hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
76d7b41b848ee61c162f4bba8b9bb68353d44d98
45
py
Python
dj_twitter_clone_app/core_config/settings/staging.py
ivanprytula/dj_demo_app
49ca506b22d3d99608e192b28787e185b39d3c24
[ "MIT" ]
null
null
null
dj_twitter_clone_app/core_config/settings/staging.py
ivanprytula/dj_demo_app
49ca506b22d3d99608e192b28787e185b39d3c24
[ "MIT" ]
null
null
null
dj_twitter_clone_app/core_config/settings/staging.py
ivanprytula/dj_demo_app
49ca506b22d3d99608e192b28787e185b39d3c24
[ "MIT" ]
null
null
null
"""Placeholder/template for staging envs."""
22.5
44
0.733333
76d842d33f2db656494e8fb701c74c89d920202e
182
py
Python
tests/test_command.py
vandana-11/cognito
4f92229511b265578def8e34d30575292070e584
[ "BSD-3-Clause" ]
null
null
null
tests/test_command.py
vandana-11/cognito
4f92229511b265578def8e34d30575292070e584
[ "BSD-3-Clause" ]
null
null
null
tests/test_command.py
vandana-11/cognito
4f92229511b265578def8e34d30575292070e584
[ "BSD-3-Clause" ]
null
null
null
from cognito.check import Check from cognito.table import Table import os import pytest import pandas as pd import numpy as np from os import path from sklearn import preprocessing
20.222222
33
0.82967
76da4334b5fdeaaf4557e3c74b65d210265f77b8
14,585
py
Python
report_writer/report_writer.py
DoubleBridges/door-order-parser
cd652922006d84a34143ded325e79d141343521d
[ "MIT" ]
null
null
null
report_writer/report_writer.py
DoubleBridges/door-order-parser
cd652922006d84a34143ded325e79d141343521d
[ "MIT" ]
null
null
null
report_writer/report_writer.py
DoubleBridges/door-order-parser
cd652922006d84a34143ded325e79d141343521d
[ "MIT" ]
null
null
null
from reportlab.lib.units import inch from reportlab.platypus import SimpleDocTemplate, Spacer from reportlab.rl_config import defaultPageSize from reportlab.lib.units import inch from reportlab.platypus.flowables import Flowable
37.590206
134
0.53459
76dba06432c777d52082f512eea09a2187e28998
201
py
Python
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
s2t2/tweet-analyzer-py
0a398fc47101a2d602d8c4116c970f1076a58f27
[ "MIT" ]
5
2020-04-02T12:03:57.000Z
2020-10-18T19:29:15.000Z
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
s2t2/tweet-analyzer-py
0a398fc47101a2d602d8c4116c970f1076a58f27
[ "MIT" ]
22
2020-03-31T02:00:34.000Z
2021-06-30T17:59:01.000Z
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
s2t2/tweet-analyzer-py
0a398fc47101a2d602d8c4116c970f1076a58f27
[ "MIT" ]
3
2020-04-04T16:08:08.000Z
2020-10-20T01:32:46.000Z
from app.bq_service import BigQueryService if __name__ == "__main__": bq_service = BigQueryService() bq_service.migrate_daily_bot_probabilities_table() print("MIGRATION SUCCESSFUL!")
16.75
54
0.756219
76dbfabe1368ceb4eba242e1e280877abf784832
12,063
py
Python
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py
MichelangeloConserva/Colosseum
b0711fd9ce75520deb74cda75c148984a8e4152f
[ "MIT" ]
null
null
null
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py
MichelangeloConserva/Colosseum
b0711fd9ce75520deb74cda75c148984a8e4152f
[ "MIT" ]
null
null
null
colosseum/mdps/minigrid_doorkey/minigrid_doorkey.py
MichelangeloConserva/Colosseum
b0711fd9ce75520deb74cda75c148984a8e4152f
[ "MIT" ]
null
null
null
from copy import deepcopy from dataclasses import asdict, dataclass from enum import IntEnum from colosseum.utils.random_vars import deterministic, get_dist try: from functools import cached_property except: from backports.cached_property import cached_property from typing import Any, Dict, List, Tuple, Type, Union import numpy as np from scipy.stats import beta, rv_continuous from colosseum.mdps import MDP from colosseum.mdps.base_mdp import NextStateSampler from colosseum.mdps.minigrid_rooms.continuous.mdp import MiniGridRoomsContinuous from colosseum.utils.mdps import check_distributions def _calculate_next_nodes_prms( self, node: MiniGridDoorKeyNode, action: int ) -> Tuple[Tuple[dict, float], ...]: newnode_prms = deepcopy(asdict(node)) if action == MiniGridDoorKeyAction.TurnRight: newnode_prms["Dir"] = (node.Dir + 1) % 4 if action == MiniGridDoorKeyAction.TurnLeft: newnode_prms["Dir"] = (node.Dir - 1) % 4 if action == MiniGridDoorKeyAction.MoveForward: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y + 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y - 1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y if next_coord in self.coordinates_available or ( node.IsDoorOpened and next_coord == self.door_position ): newnode_prms["X"], newnode_prms["Y"] = next_coord if action == MiniGridDoorKeyAction.PickObject: if node.X == node.XKey and node.Y == node.YKey: newnode_prms["XKey"] = newnode_prms["YKey"] = -1 if node.XKey == -1 and not node.IsDoorOpened: if action == MiniGridDoorKeyAction.DropObject: newnode_prms["XKey"] = node.X newnode_prms["YKey"] = node.Y if action == MiniGridDoorKeyAction.UseObject: if node.Dir == MiniGridDoorKeyDirection.UP: next_coord = (node.X, node.Y + 1) if node.Dir == MiniGridDoorKeyDirection.RIGHT: next_coord = node.X + 1, node.Y if node.Dir == MiniGridDoorKeyDirection.DOWN: next_coord = node.X, node.Y - 1 if node.Dir == MiniGridDoorKeyDirection.LEFT: next_coord = node.X - 1, node.Y if next_coord == self.door_position: newnode_prms["IsDoorOpened"] = True return ((newnode_prms, 1.0),) def _calculate_reward_distribution( self, node: Any, action: IntEnum, next_node: Any ) -> rv_continuous: return ( self.optimal_distribution if next_node.X == self.goal_position[0] and next_node.Y == self.goal_position[1] else self.other_distribution ) def _check_input_parameters(self): super(MiniGridDoorKeyMDP, self)._check_input_parameters() assert self.size >= 3 check_distributions( [ self.optimal_distribution, self.other_distribution, ], self.make_reward_stochastic, ) def _instantiate_starting_node_sampler(self) -> NextStateSampler: # noinspection PyAttributeOutsideInit self.wall_position = self._rng.randint(self.size - 2) + 1 # noinspection PyAttributeOutsideInit self.is_wall_horizontal = self._rng.rand() > 0.5 if self.is_wall_horizontal: self.door_position = self._rng.randint(self.size), self.wall_position else: self.door_position = self.wall_position, self._rng.randint(self.size) self.is_goal_before = self._rng.rand() > 0.5 coords = MiniGridRoomsContinuous.get_positions_coords_in_room(self.size, (0, 0)) goal_positions = [] starting_positions = [] for i, j in coords.ravel(): if ( i < self.wall_position if self.is_goal_before else i > self.wall_position ): goal_positions.append((j, i) if self.is_wall_horizontal else (i, j)) elif ( i > self.wall_position if self.is_goal_before else i < self.wall_position ): starting_positions.append((j, i) if self.is_wall_horizontal else (i, j)) possible_starting_positions = deepcopy(starting_positions) self._rng.shuffle(goal_positions) self.goal_position = goal_positions[0] self._rng.shuffle(starting_positions) self.start_key_position = starting_positions.pop(0) starting_positions = [ (x, y, dir) for x, y in starting_positions for dir in MiniGridDoorKeyDirection ] assert self.n_starting_states < len(starting_positions) self._possible_starting_nodes = [ MiniGridDoorKeyNode( x, y, dir.value, *self.start_key_position, False, ) for x, y, dir in starting_positions ] return NextStateSampler( next_states=self._possible_starting_nodes[: self.n_starting_states], probs=[1 / self.n_starting_states for _ in range(self.n_starting_states)], seed=self._next_seed(), ) def calc_grid_repr(self, node: Any) -> np.array: grid_size = self.size door_position = self.door_position wall_position = self.wall_position is_wall_horizontal = self.is_wall_horizontal grid = np.zeros((grid_size, grid_size), dtype=str) grid[:, :] = " " grid[self.goal_position[1], self.goal_position[0]] = "G" if self.cur_node.XKey != -1: grid[self.cur_node.YKey, self.cur_node.XKey] = "K" for i in range(grid_size): if not is_wall_horizontal: grid[i, wall_position] = "W_en" else: grid[wall_position, i] = "W_en" grid[door_position[1], door_position[0]] = ( "O" if self.cur_node.IsDoorOpened else "C" ) if self.cur_node.Dir == MiniGridDoorKeyDirection.UP: grid[self.cur_node.Y, self.cur_node.X] = "^" elif self.cur_node.Dir == MiniGridDoorKeyDirection.RIGHT: grid[self.cur_node.Y, self.cur_node.X] = ">" elif self.cur_node.Dir == MiniGridDoorKeyDirection.DOWN: grid[self.cur_node.Y, self.cur_node.X] = "v" elif self.cur_node.Dir == MiniGridDoorKeyDirection.LEFT: grid[self.cur_node.Y, self.cur_node.X] = "<" return grid[::-1, :]
36.554545
151
0.608555
76dc3dcc93cf6f1c271c8e612a3e064f4f02ee56
3,258
py
Python
tests/bugs/core_6266_test.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
tests/bugs/core_6266_test.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
tests/bugs/core_6266_test.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
#coding:utf-8 # # id: bugs.core_6266 # title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments # decription: # Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause. # Confirmed bug on 3.0.6.33271. # Checked on 3.0.6.33272 (SS/CS) - works fine. # 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build. # # tracker_id: CORE-6266 # min_versions: ['3.0.0'] # versions: 3.0 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0 # resources: None substitutions_1 = [] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) # test_script_1 #--- # import os # import sys # import time # import fdb # # ATT_CNT=5 # ATT_DELAY=1 # # os.environ["ISC_USER"] = user_name # os.environ["ISC_PASSWORD"] = user_password # # db_conn.close() # # con_list={} # for i in range(0, ATT_CNT): # if i > 0: # time.sleep( ATT_DELAY ) # # c = fdb.connect(dsn = dsn) # a = c.attachment_id # con_list[ i ] = (a, c) # # print('created attachment ', (a,c) ) # # con_admin = con_list[0][1] # # #print(con_admin.firebird_version) # # # this removes ALL connections --> should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp') # # # this removes ALL connections --> should NOT be used for reproducing ticket issue: # #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp') # # # This DOES NOT remove all attachments (only 'last' in order of timestamp), but # # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection': # con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp') # # con_admin.commit() # # cur_admin = con_admin.cursor() # cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' ) # i=0 # for r in cur_admin: # print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' ) # i += 1 # print('Number of attachments that remains alive: ',i) # # cur_admin.close() # # #print('Final cleanup before quit from Python.') # # for k,v in sorted( con_list.items() ): # #print('attempt to close attachment ', v[0] ) # try: # v[1].close() # #print('done.') # except Exception as e: # pass # #print('Got exception:', sys.exc_info()[0]) # #print(e[0]) # # #--- #act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ Number of attachments that remains alive: 0 """
31.028571
170
0.645795
76de48d1a553599d42928e5621ab909ebe023773
1,276
py
Python
scripts/senate_crawler.py
tompsh/tompsh.github.io
3283ee2de46730adf14ef4f6bd2963b345500562
[ "BSD-2-Clause" ]
null
null
null
scripts/senate_crawler.py
tompsh/tompsh.github.io
3283ee2de46730adf14ef4f6bd2963b345500562
[ "BSD-2-Clause" ]
null
null
null
scripts/senate_crawler.py
tompsh/tompsh.github.io
3283ee2de46730adf14ef4f6bd2963b345500562
[ "BSD-2-Clause" ]
null
null
null
from bs4 import BeautifulSoup import logging import pandas as pd import csv import re import requests from urllib.parse import urljoin logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
27.73913
87
0.590125
76de86f0d428be6f17b7c61c98178730bcc214cf
4,173
py
Python
backend-project/small_eod/autocomplete/tests/test_views.py
merito/small_eod
ab19b82f374cd7c4b21d8f9412657dbe7f7f03e2
[ "MIT" ]
64
2019-12-30T11:24:03.000Z
2021-06-24T01:04:56.000Z
backend-project/small_eod/autocomplete/tests/test_views.py
merito/small_eod
ab19b82f374cd7c4b21d8f9412657dbe7f7f03e2
[ "MIT" ]
465
2018-06-13T21:43:43.000Z
2022-01-04T23:33:56.000Z
backend-project/small_eod/autocomplete/tests/test_views.py
merito/small_eod
ab19b82f374cd7c4b21d8f9412657dbe7f7f03e2
[ "MIT" ]
72
2018-12-02T19:47:03.000Z
2022-01-04T22:54:49.000Z
from test_plus.test import TestCase from ...administrative_units.factories import AdministrativeUnitFactory from ...cases.factories import CaseFactory from ...channels.factories import ChannelFactory from ...events.factories import EventFactory from ...features.factories import FeatureFactory, FeatureOptionFactory from ...generic.tests.test_views import ReadOnlyViewSetMixin from ...institutions.factories import InstitutionFactory from ...letters.factories import DocumentTypeFactory, ReferenceNumberFactory from ...search.tests.mixins import SearchQueryMixin from ...tags.factories import TagFactory from ...users.factories import UserFactory
32.1
88
0.749101
76dead288194d6b5e50fd079f21d614687299cb8
1,085
py
Python
src/lennybot/model/plan.py
raynigon/lenny-bot
d906a25dc28d9102829d3d6265d300f65406db02
[ "Apache-2.0" ]
1
2021-12-15T14:03:54.000Z
2021-12-15T14:03:54.000Z
src/lennybot/model/plan.py
raynigon/lenny-bot
d906a25dc28d9102829d3d6265d300f65406db02
[ "Apache-2.0" ]
1
2021-12-15T14:02:57.000Z
2021-12-15T17:44:26.000Z
src/lennybot/model/plan.py
raynigon/lennybot
79bee9a834f885a0da2484b239cf6efaf9cb9e4e
[ "Apache-2.0" ]
null
null
null
from typing import Any, List from ..actions.iaction import IAction from ..model.state import LennyBotState
25.232558
77
0.621198
76ded3c51388324a8e665394e6561d69d52c808d
6,101
py
Python
laceworksdk/api/container_registries.py
kiddinn/python-sdk
23a33313f97337fddea155bcb19c8d5270fc8013
[ "MIT" ]
10
2021-03-20T18:12:16.000Z
2022-02-14T21:33:23.000Z
laceworksdk/api/container_registries.py
kiddinn/python-sdk
23a33313f97337fddea155bcb19c8d5270fc8013
[ "MIT" ]
10
2021-02-22T23:31:32.000Z
2022-03-25T14:11:27.000Z
laceworksdk/api/container_registries.py
kiddinn/python-sdk
23a33313f97337fddea155bcb19c8d5270fc8013
[ "MIT" ]
7
2021-06-18T18:17:12.000Z
2022-03-25T13:52:14.000Z
# -*- coding: utf-8 -*- """ Lacework Container Registries API wrapper. """ import logging logger = logging.getLogger(__name__)
28.914692
97
0.591051
76dfdcc4b341cedf794e7489e27908f2ae58e24b
10,024
py
Python
mllib/nlp/seq2seq.py
pmaxit/dlnotebooks
5e5a161bbd9d0753850029be29e1488b8858ecd5
[ "Apache-2.0" ]
null
null
null
mllib/nlp/seq2seq.py
pmaxit/dlnotebooks
5e5a161bbd9d0753850029be29e1488b8858ecd5
[ "Apache-2.0" ]
null
null
null
mllib/nlp/seq2seq.py
pmaxit/dlnotebooks
5e5a161bbd9d0753850029be29e1488b8858ecd5
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_seq2seq.ipynb (unless otherwise specified). __all__ = ['Encoder', 'NewDecoder', 'Seq2Seq'] # Cell from torch import nn from torch import optim import torch import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence # Cell # Cell # Cell import random import pytorch_lightning as pl import pytorch_lightning.metrics.functional as plfunc from pytorch_lightning.loggers import TensorBoardLogger # Cell
32.025559
120
0.621409
76dff496b7787e808a82fccd90d499cb2d9e785d
1,994
py
Python
tests/flows/test_consent.py
mrkday/SATOSA
43fd13273d7633b1d496d9c9aaef97c472ebd448
[ "Apache-2.0" ]
92
2017-11-08T08:01:27.000Z
2022-03-14T09:44:09.000Z
tests/flows/test_consent.py
mrkday/SATOSA
43fd13273d7633b1d496d9c9aaef97c472ebd448
[ "Apache-2.0" ]
155
2017-10-31T15:11:06.000Z
2022-03-11T16:59:23.000Z
tests/flows/test_consent.py
mrkday/SATOSA
43fd13273d7633b1d496d9c9aaef97c472ebd448
[ "Apache-2.0" ]
73
2017-11-05T13:53:40.000Z
2022-03-23T15:34:00.000Z
import json import re import responses from werkzeug.test import Client from werkzeug.wrappers import Response from satosa.proxy_server import make_app from satosa.satosa_config import SATOSAConfig
41.541667
112
0.660481
76e06c68d3769fb919b634d12c79af9d79a056b9
18,072
py
Python
qnarre/models/transfo_xl.py
quantapix/qnarre.com
f51d5945c20ef8182c4aa11f1b407d064c190c70
[ "MIT" ]
null
null
null
qnarre/models/transfo_xl.py
quantapix/qnarre.com
f51d5945c20ef8182c4aa11f1b407d064c190c70
[ "MIT" ]
null
null
null
qnarre/models/transfo_xl.py
quantapix/qnarre.com
f51d5945c20ef8182c4aa11f1b407d064c190c70
[ "MIT" ]
null
null
null
# Copyright 2022 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # https://arxiv.org/abs/1901.02860 # https://github.com/kimiyoung/transformer-xl import torch from torch import nn from torch.nn import functional as F from transformers.utils import logging from .. import core as qc from ..core import utils as qu from ..core import forward as qf from ..core import output as qo from ..core.embed import Adaptive, Positional from ..core.ffnet import Positionwise from ..prep.config.transfo_xl import PreTrained log = logging.get_logger(__name__)
42.224299
98
0.529714
76e0ef3752aa275816b6ecc85b1a2c5f0647c59d
3,429
py
Python
src/align/face_align_celeba.py
Dou-Yu-xuan/pykinship
f81f6667fa08a08fe726736d05476168b2a3e2f0
[ "MIT" ]
12
2020-02-19T02:50:49.000Z
2022-03-31T19:39:35.000Z
src/align/face_align_celeba.py
Dou-Yu-xuan/pykinship
f81f6667fa08a08fe726736d05476168b2a3e2f0
[ "MIT" ]
68
2020-03-23T00:07:28.000Z
2022-03-28T10:02:16.000Z
src/align/face_align_celeba.py
Dou-Yu-xuan/pykinship
f81f6667fa08a08fe726736d05476168b2a3e2f0
[ "MIT" ]
3
2020-02-11T19:07:08.000Z
2020-11-04T18:48:00.000Z
import argparse import glob import os import pickle from pathlib import Path import numpy as np from PIL import Image from tqdm import tqdm from src.align.align_trans import get_reference_facial_points, warp_and_crop_face # sys.path.append("../../") from src.align.detector import detect_faces if __name__ == "__main__": parser = argparse.ArgumentParser(description="face alignment") parser.add_argument( "-source_root", "--source_root", help="specify your source dir", default="../../data/fiw-videos/new-processed/", type=str, ) parser.add_argument( "-dest_root", "--dest_root", help="specify your destination dir", default="../../data/fiw-videos/new-processed/", type=str, ) parser.add_argument( "-crop_size", "--crop_size", help="specify size of aligned faces, align and crop with padding", default=112, type=int, ) args = parser.parse_args() source_root = args.source_root # specify your source dir dest_root = args.dest_root # specify your destination dir crop_size = ( args.crop_size ) # specify size of aligned faces, align and crop with padding scale = crop_size / 112.0 reference = get_reference_facial_points(default_square=True) * scale cwd = os.getcwd() # delete '.DS_Store' existed in the source_root os.chdir(source_root) os.system("find . -name '*.DS_Store' -type f -delete") os.chdir(cwd) imfiles = [ f for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*") if Path(f).is_file() ] # images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles} meta = {} # for subfolder in tqdm(os.listdir(source_root)): for imfile in tqdm(imfiles): ref = imfile.replace(source_root, "") print("Processing\t{}".format(imfile)) img = Image.open(imfile) try: # Handle exception bbs, landmarks = detect_faces(img) except Exception: print("{} is discarded due to exception!".format(imfile)) continue ref = imfile.replace(source_root, "") ndetections = len(landmarks) if ( ndetections == 0 ): # If the landmarks cannot be detected, the img will be discarded print("{} is discarded due to non-detected landmarks!".format(imfile)) meta[ref] = [] continue li_meta = [] for i in range(ndetections): im_meta = {} im_meta["face"] = i im_meta["landmarks"] = landmarks[i] im_meta["bb"] = bbs[i] facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)] warped_face = warp_and_crop_face( np.array(img), facial5points, reference, crop_size=(crop_size, crop_size), ) img_warped = Image.fromarray(warped_face) image_name = imfile.replace("images", "cropped").replace( ".jpg", "-{:02d}.jpg".format(i) ) # im_meta['ref'] = "/".join(image_name.split('/')[-5:]) img_warped.save(image_name) li_meta.append(im_meta) meta[ref] = li_meta with open(source_root + "cropped-meta.pkl", "wb") as f: pickle.dump(meta, f)
32.657143
90
0.585885
76e2fbbb9481d029109c5c955ed7a3309fc9c83a
117
py
Python
extract.py
rmalav15/voice-data-extract
e021428afe2706cae0e5339e96bba7f8b033117d
[ "MIT" ]
null
null
null
extract.py
rmalav15/voice-data-extract
e021428afe2706cae0e5339e96bba7f8b033117d
[ "MIT" ]
null
null
null
extract.py
rmalav15/voice-data-extract
e021428afe2706cae0e5339e96bba7f8b033117d
[ "MIT" ]
null
null
null
from srtvoiceext import extract if __name__ == '__main__': ext = extract('video.mkv', 'subtitles.srt', 'outdir')
29.25
57
0.700855
76e301801e70d562cc3a1d9777a610e89dc8d94b
632
py
Python
bacon/readonly_collections.py
aholkner/bacon
edf3810dcb211942d392a8637945871399b0650d
[ "MIT" ]
37
2015-01-29T17:42:11.000Z
2021-12-14T22:11:33.000Z
bacon/readonly_collections.py
aholkner/bacon
edf3810dcb211942d392a8637945871399b0650d
[ "MIT" ]
3
2015-08-13T17:38:05.000Z
2020-09-25T17:21:31.000Z
bacon/readonly_collections.py
aholkner/bacon
edf3810dcb211942d392a8637945871399b0650d
[ "MIT" ]
7
2015-02-12T17:54:35.000Z
2022-01-31T14:50:09.000Z
import collections
25.28
53
0.642405
76e38e9aaa4e8905b66b235b95aefae36be7dc3f
25,699
py
Python
rpg_game/gui.py
ricott1/twissh
8cbed5eef8e3326a92855cdc2cfea3f4ce214d8d
[ "MIT" ]
null
null
null
rpg_game/gui.py
ricott1/twissh
8cbed5eef8e3326a92855cdc2cfea3f4ce214d8d
[ "MIT" ]
null
null
null
rpg_game/gui.py
ricott1/twissh
8cbed5eef8e3326a92855cdc2cfea3f4ce214d8d
[ "MIT" ]
null
null
null
# encoding: utf-8 import urwid import time, os, copy from rpg_game.utils import log, mod, distance from rpg_game.constants import * from urwid import raw_display SIZE = lambda scr=raw_display.Screen(): scr.get_cols_rows() MIN_HEADER_HEIGHT = 3 MAX_MENU_WIDTH = 48 FOOTER_HEIGHT = 4 PALETTE = [ ("line", 'black', 'white', "standout"), ("top","white","black"), ("frame","white","white"), ("player", "light green", "black"), ("other", "light blue", "black"), ("monster", "dark red", "black"), ("fatigued", "dark red", "white", "standout"), ("reversed", "standout", ""), ("common","white","black"), ("common_line","black","white","standout"), ("uncommon","dark cyan","black"), ("uncommon_line","dark cyan","white","standout"), ("rare","yellow","black"), ("rare_line","yellow","white","standout"), ("unique","light magenta","black"), ("unique_line","light magenta","white","standout"), ("set","light green","black"), ("set_line","light green","white","standout"), ("normal","white","black"), ("positive","light green","black"), ("negative","dark red","black"), ("white","white","black"), ("disabled","dark red","black"), ("red","dark red","black"), ("green","light green","black"), ("yellow","yellow","black"), ("brown","brown","black"), ("white_line","black","white", "standout"), ("red_line","dark red","white", "standout"), ("green_line","light green","white", "standout"), ("yellow_line","yellow","white", "standout"), ("cyan","light cyan","black"), ("cyan_line","light cyan","white", "standout"), ("name","white","black"), ] class GUI(UiFrame): # def exit(self): # self.disconnect() # self.mind.disconnect()#should use dispatch event class IntroFrame(UiFrame): class GameFrame(UiFrame): def update_footer(self): _size = 0 inv_btns = [] for i, obj in self.player.inventory.content.items(): if obj: _size += 1 if obj.is_equipment and obj.is_equipped: _marker = ["[", (obj.color, f"{obj.marker[0]}"), "]"] elif obj.is_equipment and not obj.is_equipped: _marker = ["]", (obj.color, f"{obj.marker[0]}"), "["] elif obj.is_consumable: _marker = ["(", (obj.color, f"{obj.marker[0]}"), ")"] else: _marker = [f" {obj.marker[0]} "] else: _marker = [f" "] if i < 9: _num = f"\n {i+1} " elif i == 9: _num = "\n 0 " elif i == 10: _num = "\n - " elif i == 11: _num = "\n = " if obj and obj is self.player.inventory.selection: _marker += [("line", _num)] else: _marker += [("top", _num)] btn = urwid.Text(_marker, align="center") inv_btns.append((5, urwid.LineBox(btn))) if self.mind.screen_size != (80, 24): inv_btns.append(urwid.Text("\nSET TERMINAL\nTO 80X24", align="center")) self.contents["footer"] = (SelectableColumns(inv_btns, dividechars=0), None) self.footer_content_size = _size def on_update(self): self.update_header() if self.footer_content_size != len(self.player.inventory.all): self.update_footer() if self.mind.screen_size != (80, 24): self.update_footer() self.map.on_update() if self.menu_view: self.menu.on_update() class MapFrame(UiFrame): class MenuFrame(UiFrame): class InventoryFrame(UiFrame): class StatusFrame(UiFrame): class EquipmentFrame(UiFrame): class HelpFrame(UiFrame): class SelectableListBox(urwid.ListBox): class SelectableColumns(urwid.Columns): class FrameColumns(urwid.Columns): class ButtonLabel(urwid.SelectableIcon): def set_text(self, label): ''' set_text is invoked by Button.set_label ''' self.__super.set_text(label) self._cursor_position = len(label) + 1 class MyButton(urwid.Button): ''' - override __init__ to use our ButtonLabel instead of urwid.SelectableIcon - make button_left and button_right plain strings and variable width - any string, including an empty string, can be set and displayed - otherwise, we leave Button behaviour unchanged ''' button_left = "[" button_right = "]" # @property # def disabled(self): # return self._disabled # @disabled.setter # def disabled(self, value): # if self._disabled == value: # return # if self.disabled: # urwid.AttrMap(self, "disabled") # else: # urwid.AttrMap(self, None, "line") def attr_button(label, cmd=None, attr_map=None, focus_map = "line", align = "center", user_args = None, borders=True, disabled=False): btn = create_button(label, cmd=cmd, align = align, user_args = user_args, borders=borders, disabled=disabled) return urwid.AttrMap(btn, attr_map, focus_map=focus_map) def create_button(label, cmd=None, align = "center", user_args = None, borders=True, disabled=False): btn = MyButton(label, borders=borders, disabled=disabled) btn._label.align = align if cmd: if user_args: urwid.connect_signal(btn, "click", cmd, user_args = user_args) else: urwid.connect_signal(btn, "click", cmd) return btn
37.90413
210
0.575742
76e3aa393f7a0908df3e197db3a2c2ed201ee19d
4,851
py
Python
lale/lib/autogen/linear_regression.py
gbdrt/lale
291f824a6b96f088e787979ca768f50d7758424e
[ "Apache-2.0" ]
null
null
null
lale/lib/autogen/linear_regression.py
gbdrt/lale
291f824a6b96f088e787979ca768f50d7758424e
[ "Apache-2.0" ]
null
null
null
lale/lib/autogen/linear_regression.py
gbdrt/lale
291f824a6b96f088e787979ca768f50d7758424e
[ "Apache-2.0" ]
null
null
null
from numpy import inf, nan from sklearn.linear_model import LinearRegression as Op from lale.docstrings import set_docstrings from lale.operators import make_operator _hyperparams_schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "inherited docstring for LinearRegression Ordinary least squares Linear Regression.", "allOf": [ { "type": "object", "required": ["fit_intercept", "normalize", "copy_X", "n_jobs"], "relevantToOptimizer": ["fit_intercept", "normalize", "copy_X"], "additionalProperties": False, "properties": { "fit_intercept": { "type": "boolean", "default": True, "description": "whether to calculate the intercept for this model", }, "normalize": { "type": "boolean", "default": False, "description": "This parameter is ignored when ``fit_intercept`` is set to False", }, "copy_X": { "type": "boolean", "default": True, "description": "If True, X will be copied; else, it may be overwritten.", }, "n_jobs": { "anyOf": [{"type": "integer"}, {"enum": [None]}], "default": 1, "description": "The number of jobs to use for the computation", }, }, }, { "XXX TODO XXX": "Parameter: n_jobs > only provide speedup for n_targets > 1 and sufficient large problems" }, ], } _input_fit_schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "Fit linear model.", "type": "object", "required": ["X", "y"], "properties": { "X": { "anyOf": [ { "type": "array", "items": {"laleType": "Any", "XXX TODO XXX": "item type"}, "XXX TODO XXX": "array-like or sparse matrix, shape (n_samples, n_features)", }, { "type": "array", "items": {"type": "array", "items": {"type": "number"}}, }, ], "description": "Training data", }, "y": { "type": "array", "items": {"type": "array", "items": {"type": "number"}}, "description": "Target values", }, "sample_weight": { "type": "array", "items": {"type": "number"}, "description": "Individual weights for each sample ", }, }, } _input_predict_schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "Predict using the linear model", "type": "object", "required": ["X"], "properties": { "X": { "anyOf": [ { "type": "array", "items": {"laleType": "Any", "XXX TODO XXX": "item type"}, "XXX TODO XXX": "array_like or sparse matrix, shape (n_samples, n_features)", }, { "type": "array", "items": {"type": "array", "items": {"type": "number"}}, }, ], "description": "Samples.", } }, } _output_predict_schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "Returns predicted values.", "type": "array", "items": {"type": "number"}, } _combined_schemas = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "Combined schema for expected data and hyperparameters.", "documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.linear_model.LinearRegression#sklearn-linear_model-linearregression", "import_from": "sklearn.linear_model", "type": "object", "tags": {"pre": [], "op": ["estimator"], "post": []}, "properties": { "hyperparams": _hyperparams_schema, "input_fit": _input_fit_schema, "input_predict": _input_predict_schema, "output_predict": _output_predict_schema, }, } set_docstrings(LinearRegressionImpl, _combined_schemas) LinearRegression = make_operator(LinearRegressionImpl, _combined_schemas)
35.408759
151
0.508555
76e51515f0db0f6532d593373bce97eb6eda37bb
12,689
py
Python
Models.py
jmj23/Kaggle-Pneumothorax
96153af30468c5bcb49875dd374ac44ed1b4e2fb
[ "MIT" ]
null
null
null
Models.py
jmj23/Kaggle-Pneumothorax
96153af30468c5bcb49875dd374ac44ed1b4e2fb
[ "MIT" ]
null
null
null
Models.py
jmj23/Kaggle-Pneumothorax
96153af30468c5bcb49875dd374ac44ed1b4e2fb
[ "MIT" ]
2
2019-07-12T15:03:41.000Z
2019-08-07T21:24:49.000Z
import numpy as np from keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import Model # Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): """Creates a Block CED model for segmentation problems Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly """ use_bn = True # check for input shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible" assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input" # input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list = [] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): """Creates a Block model for pretraining on classification task Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly """ use_bn = True # check for input shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible" assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input" # input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list = [] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_skip_{}'.format(rr))(x) # average pooling x = GlobalAveragePooling2D()(x) # classifier lay_out = Dense(1, activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out)
41.877888
99
0.585625
76e58be1ebfa1f5a2978f0298b22ab49d27824a1
386
py
Python
initdb.py
dasmerlon/flunky-bot
19dff5a74bee6685e806f98c3f877216ef454a5d
[ "MIT" ]
null
null
null
initdb.py
dasmerlon/flunky-bot
19dff5a74bee6685e806f98c3f877216ef454a5d
[ "MIT" ]
null
null
null
initdb.py
dasmerlon/flunky-bot
19dff5a74bee6685e806f98c3f877216ef454a5d
[ "MIT" ]
null
null
null
#!/bin/env python """Drop and create a new database with schema.""" from sqlalchemy_utils.functions import database_exists, create_database, drop_database from flunkybot.db import engine, base from flunkybot.models import * # noqa db_url = engine.url if database_exists(db_url): drop_database(db_url) create_database(db_url) base.metadata.drop_all() base.metadata.create_all()
22.705882
86
0.790155
76e62dfaead6e340b719c28d88044ea601c31718
1,309
py
Python
setup.py
awesome-archive/webspider
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
[ "MIT" ]
null
null
null
setup.py
awesome-archive/webspider
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
[ "MIT" ]
null
null
null
setup.py
awesome-archive/webspider
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from setuptools import find_packages, setup from app import __version__ # get the dependencies and installs here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'requirements.txt')) as f: all_requirements = f.read().split('\n') setup( name='webspider', version=__version__, license='MIT', author='heguozhu', author_email='[email protected]', description='lagou.com spider', url='[email protected]:GuozhuHe/webspider.git', packages=find_packages(exclude=['tests']), package_data={'webspider': ['README.md']}, zip_safe=False, install_requires=all_requirements, entry_points={ 'console_scripts': [ 'web = app.web_app:main', 'production_web = app.quickly_cmd:run_web_app_by_gunicorn', 'crawl_lagou_data = app.tasks:crawl_lagou_data', 'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count', 'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker', 'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker', 'celery_beat = app.quickly_cmd:run_celery_beat', 'celery_flower = app.quickly_cmd.py:run_celery_flower', ], } )
34.447368
86
0.6822
76e72292730408078c92e31d3a0592b902469f3c
6,038
py
Python
Doc/conf.py
python-doc-tw/cpython-tw
9b83e9ffbdd2f3fc56de8dcdc8c4651feeb5a281
[ "PSF-2.0" ]
null
null
null
Doc/conf.py
python-doc-tw/cpython-tw
9b83e9ffbdd2f3fc56de8dcdc8c4651feeb5a281
[ "PSF-2.0" ]
null
null
null
Doc/conf.py
python-doc-tw/cpython-tw
9b83e9ffbdd2f3fc56de8dcdc8c4651feeb5a281
[ "PSF-2.0" ]
null
null
null
# # Python documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). import sys, os, time sys.path.append(os.path.abspath('tools/extensions')) # General configuration # --------------------- extensions = ['sphinx.ext.coverage', 'sphinx.ext.doctest', 'pyspecific', 'c_annotations'] # General substitutions. project = 'Python' copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y') # We look for the Include/patchlevel.h file in the current Python source tree # and replace the values accordingly. import patchlevel version, release = patchlevel.get_version_info() # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # By default, highlight as Python 3. highlight_language = 'python3' # Require Sphinx 1.2 for build. needs_sphinx = '1.2' # Ignore any .rst files in the venv/ directory. exclude_patterns = ['venv/*'] # Options for HTML output # ----------------------- # Use our custom theme. html_theme = 'pydoctheme' html_theme_path = ['tools'] html_theme_options = {'collapsiblesidebar': True} # Short title used e.g. for <title> HTML tags. html_short_title = '%s Documentation' % release # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # Path to find HTML templates. templates_path = ['tools/templates'] # Custom sidebar templates, filenames relative to this file. html_sidebars = { 'index': 'indexsidebar.html', } # Additional templates that should be rendered to pages. html_additional_pages = { 'download': 'download.html', 'index': 'indexcontent.html', } # Output an OpenSearch description file. html_use_opensearch = 'https://docs.python.org/' + version # Additional static files. html_static_path = ['tools/static'] # Output file base name for HTML help builder. htmlhelp_basename = 'python' + release.replace('.', '') # Split the index html_split_index = True # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). latex_paper_size = 'a4' # The font size ('10pt', '11pt' or '12pt'). latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = r'Guido van Rossum\\and the Python development team' latex_documents = [ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'), ('distributing/index', 'distributing.tex', 'Distributing Python Modules', _stdauthor, 'manual'), ('extending/index', 'extending.tex', 'Extending and Embedding Python', _stdauthor, 'manual'), ('installing/index', 'installing.tex', 'Installing Python Modules', _stdauthor, 'manual'), ('library/index', 'library.tex', 'The Python Library Reference', _stdauthor, 'manual'), ('reference/index', 'reference.tex', 'The Python Language Reference', _stdauthor, 'manual'), ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'), ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'), ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'), ('whatsnew/' + version, 'whatsnew.tex', 'What\'s New in Python', 'A. M. Kuchling', 'howto'), ] # Collect all HOWTOs individually latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto') for fn in os.listdir('howto') if fn.endswith('.rst') and fn != 'index.rst') # Additional stuff for the LaTeX preamble. latex_preamble = r''' \authoraddress{ \strong{Python Software Foundation}\\ Email: \email{[email protected]} } \let\Verbatim=\OriginalVerbatim \let\endVerbatim=\endOriginalVerbatim ''' # Documents to append as an appendix to all manuals. latex_appendices = ['glossary', 'about', 'license', 'copyright'] # Get LaTeX to handle Unicode correctly latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''} # Options for Epub output # ----------------------- epub_author = 'Python Documentation Authors' epub_publisher = 'Python Software Foundation' # Options for the coverage checker # -------------------------------- # The coverage checker will ignore all modules/functions/classes whose names # match any of the following regexes (using re.match). coverage_ignore_modules = [ r'[T|t][k|K]', r'Tix', r'distutils.*', ] coverage_ignore_functions = [ 'test($|_)', ] coverage_ignore_classes = [ ] # Glob patterns for C source files for C API coverage, relative to this directory. coverage_c_path = [ '../Include/*.h', ] # Regexes to find C items in the source files. coverage_c_regexes = { 'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'), 'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'), 'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'), } # The coverage checker will ignore all C items whose names match these regexes # (using re.match) -- the keys must be the same as in coverage_c_regexes. coverage_ignore_c_items = { # 'cfunction': [...] } # Options for the link checker # ---------------------------- # Ignore certain URLs. linkcheck_ignore = [r'https://bugs.python.org/(issue)?\d+', # Ignore PEPs for now, they all have permanent redirects. r'http://www.python.org/dev/peps/pep-\d+'] # Options for extensions # ---------------------- # Relative filename of the reference count data file. refcount_file = 'data/refcounts.dat' # Translation # ----------- gettext_compact = False locale_dirs = ["locale"]
29.598039
82
0.661809
76e8aa5b3dcd6d5941acd4ac1423725bbe5688e5
2,178
py
Python
basic_stats.py/basic_stats.py
RahmB/basic_stats
b286fc84faa6dab17aa8d1e04d85fbf29a41ee12
[ "MIT" ]
null
null
null
basic_stats.py/basic_stats.py
RahmB/basic_stats
b286fc84faa6dab17aa8d1e04d85fbf29a41ee12
[ "MIT" ]
null
null
null
basic_stats.py/basic_stats.py
RahmB/basic_stats
b286fc84faa6dab17aa8d1e04d85fbf29a41ee12
[ "MIT" ]
null
null
null
# Import the matplotlib module here. No other modules should be used. # Import plotting library import matplotlib.pyplot as plt #import.... from os import * # Import Numpy import numpy as np
22
77
0.565657
76e91ea24b8b713b4825e3c31ae941d3409f7123
4,987
py
Python
src/catkin_pkg/cli/tag_changelog.py
delftrobotics-forks/catkin_pkg
122eae0971f13a6080b72af6bb0eb52656c00bea
[ "BSD-3-Clause" ]
2
2018-12-11T16:35:20.000Z
2019-01-23T16:42:17.000Z
usr/lib/python2.7/dist-packages/catkin_pkg/cli/tag_changelog.py
Roboy/roboy_managing_node_fpga
64ffe5aec2f2c98a051bb1a881849c195b8d052c
[ "BSD-3-Clause" ]
1
2020-08-25T11:24:44.000Z
2020-09-22T14:01:26.000Z
src/catkin_pkg/cli/tag_changelog.py
plusone-robotics/catkin_pkg
9d68332b97db07f77a8b56bb5afaf89ec2536dfa
[ "BSD-3-Clause" ]
4
2019-04-30T23:34:51.000Z
2021-07-04T07:55:34.000Z
"""This script renames the forthcoming section in changelog files with the upcoming version and the current date""" from __future__ import print_function import argparse import datetime import docutils.core import os import re import sys from catkin_pkg.changelog import CHANGELOG_FILENAME, get_changelog_from_path from catkin_pkg.changelog_generator import FORTHCOMING_LABEL from catkin_pkg.package_version import bump_version from catkin_pkg.packages import find_packages, verify_equal_package_versions
43.745614
166
0.687989
76eaa983d4b2d01d9a4e9daae5b69684ff9a0e05
1,199
py
Python
tests/optims/distributed_adamw_test.py
AswinRetnakumar/Machina
6519935ca4553192ac99fc1c7c1e7cab9dd72693
[ "MIT" ]
302
2019-03-13T10:21:29.000Z
2022-03-25T10:01:46.000Z
tests/optims/distributed_adamw_test.py
AswinRetnakumar/Machina
6519935ca4553192ac99fc1c7c1e7cab9dd72693
[ "MIT" ]
50
2019-03-13T09:45:00.000Z
2021-12-23T18:32:00.000Z
tests/optims/distributed_adamw_test.py
AswinRetnakumar/Machina
6519935ca4553192ac99fc1c7c1e7cab9dd72693
[ "MIT" ]
55
2019-03-17T01:59:57.000Z
2022-03-28T01:13:40.000Z
import os import unittest import torch import torch.distributed as dist from torch.multiprocessing import Process import torch.nn as nn from machina.optims import DistributedAdamW
26.065217
50
0.559633
76eba1361f488fe5c5dadbe9595a11b254abd926
870
py
Python
rsqueakvm/model/__init__.py
shiplift/RSqueakOnABoat
ac449758ddb7aef1721e65a13171547761dd6e39
[ "BSD-3-Clause" ]
44
2015-02-08T09:38:46.000Z
2017-11-15T01:19:40.000Z
rsqueakvm/model/__init__.py
shiplift/RSqueakOnABoat
ac449758ddb7aef1721e65a13171547761dd6e39
[ "BSD-3-Clause" ]
112
2015-02-08T09:34:40.000Z
2017-04-10T19:06:30.000Z
rsqueakvm/model/__init__.py
shiplift/RSqueakOnABoat
ac449758ddb7aef1721e65a13171547761dd6e39
[ "BSD-3-Clause" ]
7
2015-04-08T11:49:10.000Z
2017-01-19T06:36:27.000Z
""" Squeak model. W_Object W_SmallInteger W_MutableSmallInteger W_AbstractObjectWithIdentityHash W_AbstractFloat W_Float W_MutableFloat W_Character W_PointersObject W_AbstractObjectWithClassReference W_LargeInteger W_LargeIntegerWord W_LargeIntegerBig W_BytesObject W_WordsObject W_CompiledMethod W_SpurCompiledMethod W_PreSpurCompiledMethod """ from rsqueakvm.model.base import * from rsqueakvm.model.character import * from rsqueakvm.model.compiled_methods import * # from rsqueakvm.model.display import * from rsqueakvm.model.numeric import * from rsqueakvm.model.pointers import * from rsqueakvm.model.variable import *
28.064516
46
0.625287
76ebc2ee4ceeeeacb1f5e2ff455580aa77112974
6,352
py
Python
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
nikola3794/edge-evaluation-PASCAL-MT-tmp
d3bc7164608a20eb6351c1d41219213927ae6239
[ "MIT" ]
null
null
null
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
nikola3794/edge-evaluation-PASCAL-MT-tmp
d3bc7164608a20eb6351c1d41219213927ae6239
[ "MIT" ]
null
null
null
Multi-Task-Learning-PyTorch-master/losses/loss_functions.py
nikola3794/edge-evaluation-PASCAL-MT-tmp
d3bc7164608a20eb6351c1d41219213927ae6239
[ "MIT" ]
null
null
null
# This code is referenced from # https://github.com/facebookresearch/astmt/ # # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # License: Attribution-NonCommercial 4.0 International import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.module import Module import numpy as np
32.080808
121
0.62012
76ebcd294c425806f2a19ba5ab050dfad80e8987
826
py
Python
trabalho-numerico/tridimensional.py
heissonwillen/tcm
71da46489f12e64b50436b17447721cb8f7eaf09
[ "MIT" ]
null
null
null
trabalho-numerico/tridimensional.py
heissonwillen/tcm
71da46489f12e64b50436b17447721cb8f7eaf09
[ "MIT" ]
null
null
null
trabalho-numerico/tridimensional.py
heissonwillen/tcm
71da46489f12e64b50436b17447721cb8f7eaf09
[ "MIT" ]
null
null
null
from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm import numpy as np import os import contorno from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X z_temp = contorno.p_3 TAMANHO_BARRA = 2 x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1) y = np.linspace(0.0, DELTA_T, PASSOS+1) z = [] for k in range(PASSOS+1): z_k = np.copy(z_temp) z.append(z_k) for i in range(1, INTERVALOS): z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1]) z = np.asarray(z) x, y = np.meshgrid(x, y) fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False) ax.set_xlabel('x') ax.set_ylabel('t') ax.set_zlabel('T(x,t)') fig.colorbar(surf, shrink=0.5, aspect=5) plt.show()
22.944444
82
0.692494
76edfc1706c920930c1bc7aab823c6e785689aff
1,406
py
Python
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py
zyeak/leetcode
5d7bf16bd755224223c71e8e6df81c1ff49daadc
[ "MIT" ]
null
null
null
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py
zyeak/leetcode
5d7bf16bd755224223c71e8e6df81c1ff49daadc
[ "MIT" ]
null
null
null
leetcode/0006_ZigZag_Conversion/zigzag_conversion.py
zyeak/leetcode
5d7bf16bd755224223c71e8e6df81c1ff49daadc
[ "MIT" ]
null
null
null
# solution 1: # Solution 2 if __name__ == '__main__': # begin s = Solution() print(s.convert("PAYPALISHIRING", 3))
31.244444
92
0.516358
76ef2321c51f2dff2461f9538c87721e5bf560d2
2,013
py
Python
FakeNewsClassifierWithLSTM.py
pratikasarkar/nlp
275c80ab10f6dc4b4553bbcc5e5c8a4d00ff9fea
[ "Unlicense" ]
null
null
null
FakeNewsClassifierWithLSTM.py
pratikasarkar/nlp
275c80ab10f6dc4b4553bbcc5e5c8a4d00ff9fea
[ "Unlicense" ]
null
null
null
FakeNewsClassifierWithLSTM.py
pratikasarkar/nlp
275c80ab10f6dc4b4553bbcc5e5c8a4d00ff9fea
[ "Unlicense" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Thu Feb 11 13:42:45 2021 @author: ASUS """ import pandas as pd df = pd.read_csv(r'D:\nlp\fake-news-data\train.csv') df = df.dropna() X = df.drop('label',axis = 1) y = df['label'] import tensorflow as tf from tensorflow.keras.layers import Embedding, Dense, LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import one_hot # Vocabulary size voc_size = 5000 # One Hot Representation messages = X.copy() messages.reset_index(inplace = True) import nltk import re from nltk.corpus import stopwords # Dataset Preprocessing from nltk.stem import PorterStemmer ps = PorterStemmer() corpus = [] for i in range(len(messages)): print(i) review = re.sub('[^a-zA-Z]',' ',messages['title'][i]) review = review.lower() review = review.split() review = [ps.stem(word) for word in review if word not in stopwords.words('english')] review = " ".join(review) corpus.append(review) onehot_repr = [one_hot(words,voc_size) for words in corpus] sent_len = 20 embedded_doc = pad_sequences(onehot_repr,maxlen = sent_len,padding = 'pre') # Creating the model embedding_vector_features = 40 model = Sequential() model.add(Embedding(voc_size,embedding_vector_features,input_length=sent_len)) model.add(LSTM(100)) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer = 'adam',metrics = ['accuracy']) model.summary() import numpy as np X_final = np.array(embedded_doc) y_final = np.array(y) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_final,y_final,test_size = 0.33,random_state = 42) model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=10,batch_size=64) y_pred = model.predict_classes(X_test) from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test,y_pred) acc = accuracy_score(y_test,y_pred)
27.958333
103
0.752608
76f0f94143a86c5bd1bdfebcc7fe3a026073720d
860
py
Python
SVM/SVM_12_Quiz.py
rohit517/Intro-to-machine-learning-Udacity
d0b2cc6cac1cb3408b274225cecd4afcea4ee30f
[ "MIT" ]
null
null
null
SVM/SVM_12_Quiz.py
rohit517/Intro-to-machine-learning-Udacity
d0b2cc6cac1cb3408b274225cecd4afcea4ee30f
[ "MIT" ]
null
null
null
SVM/SVM_12_Quiz.py
rohit517/Intro-to-machine-learning-Udacity
d0b2cc6cac1cb3408b274225cecd4afcea4ee30f
[ "MIT" ]
null
null
null
import sys from class_vis import prettyPicture from prep_terrain_data import makeTerrainData import matplotlib.pyplot as plt import copy import numpy as np import pylab as pl features_train, labels_train, features_test, labels_test = makeTerrainData() ########################## SVM ################################# ### we handle the import statement and SVC creation for you here from sklearn.svm import SVC clf = SVC(kernel="linear") #### now your job is to fit the classifier #### using the training features/labels, and to #### make a set of predictions on the test data clf.fit(features_train,labels_train) pred = clf.predict(features_test) #### store your predictions in a list named pred from sklearn.metrics import accuracy_score acc = accuracy_score(pred, labels_test)
25.294118
77
0.696512
76f17efadc147bee33131952c1b99b7ec42d46c2
1,890
py
Python
tests/test_auto_scan_logsigmoid.py
yeliang2258/Paddle2ONNX
5eeef77f2f90d1e2a45dacf6eb1cc5f35f6224a4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/test_auto_scan_logsigmoid.py
yeliang2258/Paddle2ONNX
5eeef77f2f90d1e2a45dacf6eb1cc5f35f6224a4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/test_auto_scan_logsigmoid.py
yeliang2258/Paddle2ONNX
5eeef77f2f90d1e2a45dacf6eb1cc5f35f6224a4
[ "ECL-2.0", "Apache-2.0" ]
1
2022-01-29T04:35:49.000Z
2022-01-29T04:35:49.000Z
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from auto_scan_test import OPConvertAutoScanTest, BaseNet from hypothesis import reproduce_failure import hypothesis.strategies as st import numpy as np import unittest import paddle if __name__ == "__main__": unittest.main()
26.619718
74
0.635979
76f217cfd33281d5ca8af791540db7576b28df64
4,408
py
Python
oasislmf/utils/concurrency.py
bbetov-corelogic/OasisLMF
fcb9a595ec6eb30c2ed3b9b67152c2f27fc0082b
[ "BSD-3-Clause" ]
null
null
null
oasislmf/utils/concurrency.py
bbetov-corelogic/OasisLMF
fcb9a595ec6eb30c2ed3b9b67152c2f27fc0082b
[ "BSD-3-Clause" ]
null
null
null
oasislmf/utils/concurrency.py
bbetov-corelogic/OasisLMF
fcb9a595ec6eb30c2ed3b9b67152c2f27fc0082b
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import open from builtins import str from future import standard_library standard_library.install_aliases() try: from queue import Queue, Empty except ImportError: from Queue import Queue, Empty import sys import types import billiard from signal import ( signal, SIGINT, ) from threading import ( Event, Thread, ) __all__ = [ 'multiprocess', 'multithread', 'SignalHandler', 'Task' ] def multithread(tasks, pool_size=10): """ Executes several tasks concurrently via ``threading`` threads, puts the results into a queue, and generates these back to the caller. """ task_q = Queue() num_tasks = 0 for task in tasks: task_q.put(task) num_tasks += 1 result_q = Queue() stopper = Event() threads = tuple(Thread(target=run, args=(i, task_q, result_q, stopper,)) for i in range(pool_size)) handler = SignalHandler(stopper, threads) signal(SIGINT, handler) for thread in threads: thread.start() task_q.join() while not result_q.empty(): key, result = result_q.get_nowait() yield key, result
23.078534
103
0.595735
76f2637d428beecc1c55ba4761f8ecce6c4c4884
26,267
py
Python
runtime/python/Lib/site-packages/isort/output.py
hwaipy/InteractionFreeNode
88642b68430f57b028fd0f276a5709f89279e30d
[ "MIT" ]
4
2021-10-20T12:39:09.000Z
2022-02-26T15:02:08.000Z
runtime/python/Lib/site-packages/isort/output.py
hwaipy/InteractionFreeNode
88642b68430f57b028fd0f276a5709f89279e30d
[ "MIT" ]
20
2021-05-03T18:02:23.000Z
2022-03-12T12:01:04.000Z
runtime/python/Lib/site-packages/isort/output.py
hwaipy/InteractionFreeNode
88642b68430f57b028fd0f276a5709f89279e30d
[ "MIT" ]
3
2021-08-28T14:22:36.000Z
2021-10-06T18:59:41.000Z
import copy import itertools from functools import partial from typing import Any, Iterable, List, Optional, Set, Tuple, Type from isort.format import format_simplified from . import parse, sorting, wrap from .comments import add_to_line as with_comments from .identify import STATEMENT_DECLARATIONS from .settings import DEFAULT_CONFIG, Config def sorted_imports( parsed: parse.ParsedContent, config: Config = DEFAULT_CONFIG, extension: str = "py", import_type: str = "import", ) -> str: """Adds the imports back to the file. (at the index of the first import) sorted alphabetically and split between groups """ if parsed.import_index == -1: return _output_as_string(parsed.lines_without_imports, parsed.line_separator) formatted_output: List[str] = parsed.lines_without_imports.copy() remove_imports = [format_simplified(removal) for removal in config.remove_imports] sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate) if config.no_sections: parsed.imports["no_sections"] = {"straight": {}, "from": {}} base_sections: Tuple[str, ...] = () for section in sections: if section == "FUTURE": base_sections = ("FUTURE",) continue parsed.imports["no_sections"]["straight"].update( parsed.imports[section].get("straight", {}) ) parsed.imports["no_sections"]["from"].update(parsed.imports[section].get("from", {})) sections = base_sections + ("no_sections",) output: List[str] = [] seen_headings: Set[str] = set() pending_lines_before = False for section in sections: straight_modules = parsed.imports[section]["straight"] if not config.only_sections: straight_modules = sorting.sort( config, straight_modules, key=lambda key: sorting.module_key( key, config, section_name=section, straight_import=True ), reverse=config.reverse_sort, ) from_modules = parsed.imports[section]["from"] if not config.only_sections: from_modules = sorting.sort( config, from_modules, key=lambda key: sorting.module_key(key, config, section_name=section), reverse=config.reverse_sort, ) if config.star_first: star_modules = [] other_modules = [] for module in from_modules: if "*" in parsed.imports[section]["from"][module]: star_modules.append(module) else: other_modules.append(module) from_modules = star_modules + other_modules straight_imports = _with_straight_imports( parsed, config, straight_modules, section, remove_imports, import_type ) from_imports = _with_from_imports( parsed, config, from_modules, section, remove_imports, import_type ) lines_between = [""] * ( config.lines_between_types if from_modules and straight_modules else 0 ) if config.from_first: section_output = from_imports + lines_between + straight_imports else: section_output = straight_imports + lines_between + from_imports if config.force_sort_within_sections: # collapse comments comments_above = [] new_section_output: List[str] = [] for line in section_output: if not line: continue if line.startswith("#"): comments_above.append(line) elif comments_above: new_section_output.append(_LineWithComments(line, comments_above)) comments_above = [] else: new_section_output.append(line) # only_sections options is not imposed if force_sort_within_sections is True new_section_output = sorting.sort( config, new_section_output, key=partial(sorting.section_key, config=config), reverse=config.reverse_sort, ) # uncollapse comments section_output = [] for line in new_section_output: comments = getattr(line, "comments", ()) if comments: section_output.extend(comments) section_output.append(str(line)) section_name = section no_lines_before = section_name in config.no_lines_before if section_output: if section_name in parsed.place_imports: parsed.place_imports[section_name] = section_output continue section_title = config.import_headings.get(section_name.lower(), "") if section_title and section_title not in seen_headings: if config.dedup_headings: seen_headings.add(section_title) section_comment = f"# {section_title}" if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch section_output.insert(0, section_comment) if pending_lines_before or not no_lines_before: output += [""] * config.lines_between_sections output += section_output pending_lines_before = False else: pending_lines_before = pending_lines_before or not no_lines_before if config.ensure_newline_before_comments: output = _ensure_newline_before_comment(output) while output and output[-1].strip() == "": output.pop() # pragma: no cover while output and output[0].strip() == "": output.pop(0) if config.formatting_function: output = config.formatting_function( parsed.line_separator.join(output), extension, config ).splitlines() output_at = 0 if parsed.import_index < parsed.original_line_count: output_at = parsed.import_index formatted_output[output_at:0] = output if output: imports_tail = output_at + len(output) while [ character.strip() for character in formatted_output[imports_tail : imports_tail + 1] ] == [""]: formatted_output.pop(imports_tail) if len(formatted_output) > imports_tail: next_construct = "" tail = formatted_output[imports_tail:] for index, line in enumerate(tail): # pragma: no branch should_skip, in_quote, *_ = parse.skip_line( line, in_quote="", index=len(formatted_output), section_comments=config.section_comments, needs_import=False, ) if not should_skip and line.strip(): if ( line.strip().startswith("#") and len(tail) > (index + 1) and tail[index + 1].strip() ): continue next_construct = line break if in_quote: # pragma: no branch next_construct = line break if config.lines_after_imports != -1: formatted_output[imports_tail:0] = [ "" for line in range(config.lines_after_imports) ] elif extension != "pyi" and next_construct.startswith(STATEMENT_DECLARATIONS): formatted_output[imports_tail:0] = ["", ""] else: formatted_output[imports_tail:0] = [""] if parsed.place_imports: new_out_lines = [] for index, line in enumerate(formatted_output): new_out_lines.append(line) if line in parsed.import_placements: new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]]) if ( len(formatted_output) <= (index + 1) or formatted_output[index + 1].strip() != "" ): new_out_lines.append("") formatted_output = new_out_lines return _output_as_string(formatted_output, parsed.line_separator)
40.91433
100
0.516199
76f317598810c56fd2ed005b83b2ae2293df83ae
24,928
py
Python
vixen/project.py
amoeba/vixen
a2b450fa918e23da644b1818807577139a0ae6e8
[ "BSD-3-Clause" ]
10
2017-09-19T11:00:11.000Z
2021-08-12T08:56:15.000Z
vixen/project.py
amoeba/vixen
a2b450fa918e23da644b1818807577139a0ae6e8
[ "BSD-3-Clause" ]
22
2018-01-14T11:22:14.000Z
2020-04-08T00:01:29.000Z
vixen/project.py
amoeba/vixen
a2b450fa918e23da644b1818807577139a0ae6e8
[ "BSD-3-Clause" ]
3
2018-01-24T16:55:01.000Z
2019-06-17T04:26:33.000Z
import datetime import io import json_tricks import logging import os from os.path import (abspath, basename, dirname, exists, expanduser, join, realpath, relpath, splitext) import re import shutil import sys from traits.api import (Any, Dict, Enum, HasTraits, Instance, List, Long, Str) from whoosh import fields, qparser, query from whoosh.util.times import datetime_to_long, long_to_datetime from .common import get_project_dir from .media import Media, MediaData, get_media_data from .directory import Directory from . import processor logger = logging.getLogger(__name__) if sys.version_info[0] > 2: unicode = str string_types = (str,) import csv else: string_types = (basestring,) import backports.csv as csv INT = fields.NUMERIC(numtype=int) FLOAT = fields.NUMERIC(numtype=float) COMMON_TAGS = dict( file_name='string', path='string', relpath='string', ctime='string', mtime='string', size='int', type='string' ) def _search_media(expr, m_key, get_tag): """Given search expression, index to media, and a getter to get the attribute check if the media matches expression. """ if expr.is_leaf(): if isinstance(expr, query.Term): attr = expr.fieldname return _check_value(get_tag(m_key, attr), expr.text) elif isinstance(expr, query.Phrase): attr = expr.fieldname text = " ".join(expr.words) return _check_value(get_tag(m_key, attr), text) elif isinstance(expr, query.DateRange): if expr.fieldname == 'ctime': value = get_tag(m_key, 'ctime_') elif expr.fieldname == 'mtime': value = get_tag(m_key, 'mtime_') return _check_date_range(value, expr) elif isinstance(expr, query.NumericRange): attr = expr.fieldname return _check_range(get_tag(m_key, attr), expr) else: print("Unsupported term: %r" % expr) return False else: if isinstance(expr, query.And): result = True for child in expr.children(): result &= _search_media(child, m_key, get_tag) if not result: break return result elif isinstance(expr, query.Or): result = False for child in expr.children(): result |= _search_media(child, m_key, get_tag) if result: break return result elif isinstance(expr, query.Not): subquery = list(expr.children())[0] return not _search_media(subquery, m_key, get_tag) else: print("Unsupported term: %r" % expr) return False
33.193076
81
0.565709
76f36db1130141ba9e8823d77aa6984660a91f95
5,659
py
Python
prance/util/translator.py
elemental-lf/prance
d4bb6d2edf00ef18540a140025df8ad75d01fc16
[ "MIT" ]
null
null
null
prance/util/translator.py
elemental-lf/prance
d4bb6d2edf00ef18540a140025df8ad75d01fc16
[ "MIT" ]
null
null
null
prance/util/translator.py
elemental-lf/prance
d4bb6d2edf00ef18540a140025df8ad75d01fc16
[ "MIT" ]
null
null
null
"""This submodule contains a JSON reference translator.""" __author__ = 'tpn Tomsa' __copyright__ = 'Copyright 2021 tpn Tomsa' __license__ = 'MIT' __all__ = () import prance.util.url as _url def _reference_key(ref_url, item_path): """ Return a portion of the dereferenced URL. format - ref-url_obj-path """ return ref_url.path.split('/')[-1] + '_' + '_'.join(item_path[1:]) # Underscored to allow some time for the public API to be stabilized.
36.044586
88
0.621841
76f3d0ad2bb9ab5227cf466b13a6b3829002cac1
800
py
Python
src/tests/stream.py
LakshyaaSoni/dropSQL
da07ad3edf2d55f0521a385ad10678fc353b4b2b
[ "MIT" ]
35
2017-11-27T22:24:46.000Z
2022-01-16T23:50:01.000Z
src/tests/stream.py
LakshyaaSoni/dropSQL
da07ad3edf2d55f0521a385ad10678fc353b4b2b
[ "MIT" ]
null
null
null
src/tests/stream.py
LakshyaaSoni/dropSQL
da07ad3edf2d55f0521a385ad10678fc353b4b2b
[ "MIT" ]
2
2018-02-20T06:06:12.000Z
2021-10-16T18:30:15.000Z
from io import StringIO from unittest import TestCase from dropSQL.parser.streams import *
20
37
0.51125
76f498ab6421077add9a6f59a90898f50d7b050c
3,501
py
Python
tests/test_bugs.py
mmibrah2/OpenQL
8fd4ccb0fa342f777b827235748fa5f6592b0c25
[ "Apache-2.0" ]
null
null
null
tests/test_bugs.py
mmibrah2/OpenQL
8fd4ccb0fa342f777b827235748fa5f6592b0c25
[ "Apache-2.0" ]
null
null
null
tests/test_bugs.py
mmibrah2/OpenQL
8fd4ccb0fa342f777b827235748fa5f6592b0c25
[ "Apache-2.0" ]
null
null
null
import os import filecmp import unittest import numpy as np from openql import openql as ql from utils import file_compare curdir = os.path.dirname(os.path.realpath(__file__)) output_dir = os.path.join(curdir, 'test_output') # relates to https://github.com/QE-Lab/OpenQL/issues/171 # various runs of compiles were generating different results or in the best # case strange errors. So multiple (NCOMPILES) runs of compile are executed # to make sure there is no error and output generated in all these runs is same # JvS: more likely, it also had to do with the classical register allocator # depending on stuff like Python's garbage collection to free a register. # The register numbers have to be hardcoded now for that reason. def test_stateful_behavior(self): ql.set_option('optimize', 'no') ql.set_option('scheduler', 'ALAP') platform = ql.Platform("myPlatform", 'cc_light') sweep_points = [1] nqubits = 3 nregs = 3 p = ql.Program("statelessProgram", platform, nqubits, nregs) p.set_sweep_points(sweep_points) k = ql.Kernel("aKernel", platform, nqubits, nregs) k.prepz(0) k.gate('rx180', [0]) k.measure(0) rd = ql.CReg(0) rs1 = ql.CReg(1) rs2 = ql.CReg(2) k.classical(rs1, ql.Operation(3)) k.classical(rs1, ql.Operation(4)) k.classical(rd, ql.Operation(rs1, '+', rs2)) p.add_kernel(k) NCOMPILES=50 QISA_fn = os.path.join(output_dir, p.name+'_last.qasm') for i in range(NCOMPILES): p.compile() self.setUpClass() QISA_fn_i = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') os.rename(QISA_fn,QISA_fn_i) for i in range(NCOMPILES-1): QISA_fn_1 = os.path.join(output_dir, p.name+'_'+str(i)+'_last.qasm') QISA_fn_2 = os.path.join(output_dir, p.name+'_'+str(i+1)+'_last.qasm') self.assertTrue( file_compare(QISA_fn_1, QISA_fn_2)) # Unclear how this test works. # When clear, enable it again. # Now it fails, not clear how to repair, so it is disabled. # def test_empty_infinite_loop(self): # name = 'empty_infinite_loop' # in_fn = 'test_' + name + '.cq' # out_fn = 'test_output/' + name + '_out.cq' # gold_fn = 'golden/' + name + '_out.cq' # ql.initialize() # #ql.set_option('log_level', 'LOG_DEBUG') # ql.compile(in_fn) # self.assertTrue(file_compare(out_fn, gold_fn)) if __name__ == '__main__': unittest.main()
32.719626
83
0.622394
76f6512f7d0f9be2b22c77b6be1aa4a85a8c2498
1,530
py
Python
utils/setAddress.py
wedvjin/rs485-moist-sensor
90930a34d0e6ec977f6083e70cc4bd931d7453fb
[ "Apache-2.0" ]
1
2019-03-04T13:24:42.000Z
2019-03-04T13:24:42.000Z
utils/setAddress.py
wedvjin/rs485-moist-sensor
90930a34d0e6ec977f6083e70cc4bd931d7453fb
[ "Apache-2.0" ]
null
null
null
utils/setAddress.py
wedvjin/rs485-moist-sensor
90930a34d0e6ec977f6083e70cc4bd931d7453fb
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python """Looks for sensor on the bus and changes it's address to the one specified on command line""" import argparse import minimalmodbus import serial from time import sleep parser = argparse.ArgumentParser() parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to set') args = parser.parse_args() ADDRESS1 = 1 ADDRESS2 = args.address minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True minimalmodbus.PARITY=serial.PARITY_NONE minimalmodbus.STOPBITS = 2 minimalmodbus.BAUDRATE=19200 minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True # sensor.debug=True (found, i) = scanModbus() if found: print('Found sensor at address: ' + str(i)) try: sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i) print("writing new address: " + str(ADDRESS2)) sensor.write_register(0, value=ADDRESS2, functioncode=6) sleep(0.2) sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2) print("reading address from holding register: ") print(sensor.read_register(0, functioncode=3)) except: print "Could not change the address. Check your connections" else: print('No sensor on the bus found')
27.321429
105
0.733333
76f7444b8365ea513820f85545f6a315ea621999
6,577
py
Python
python/ray/tune/tests/test_tune_save_restore.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[ "Apache-2.0" ]
22
2018-05-08T05:52:34.000Z
2020-04-01T10:09:55.000Z
python/ray/tune/tests/test_tune_save_restore.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[ "Apache-2.0" ]
73
2021-09-25T07:11:39.000Z
2022-03-26T07:10:59.000Z
python/ray/tune/tests/test_tune_save_restore.py
mgelbart/ray
4cec2286572e368a4bd64aae467751a384eff62d
[ "Apache-2.0" ]
10
2018-04-27T10:50:59.000Z
2020-02-24T02:41:43.000Z
# coding: utf-8 import os import pickle import shutil import tempfile import unittest import ray from ray import tune from ray.rllib import _register_all from ray.tune import Trainable from ray.tune.utils import validate_save_restore if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
35.360215
85
0.635092
76f7e1b302002b518c986240747a14b0f7bf282f
4,291
py
Python
src/manifest.py
silent1mezzo/lightsaber
e470be7fb84b810fe846ff0ede78d06bf69cd5e3
[ "MIT" ]
13
2020-08-12T12:04:19.000Z
2022-03-12T03:53:07.000Z
src/manifest.py
silent1mezzo/lightsaber
e470be7fb84b810fe846ff0ede78d06bf69cd5e3
[ "MIT" ]
46
2020-09-03T06:00:18.000Z
2022-03-25T10:03:53.000Z
src/manifest.py
silent1mezzo/lightsaber
e470be7fb84b810fe846ff0ede78d06bf69cd5e3
[ "MIT" ]
3
2021-08-11T19:12:37.000Z
2021-11-09T15:19:59.000Z
MANIFEST = { "hilt": { "h1": { "offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}}, "colours": { "primary": (216, 216, 216), # d8d8d8 "secondary": (141, 141, 141), # 8d8d8d "tertiary": (180, 97, 19), # b46113 }, "length": 24, "materials": "Alloy metal/Salvaged materials", }, "h2": { "offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}}, "colours": { "primary": (112, 112, 112), # 707070 "secondary": (0, 0, 0), # 000000 "tertiary": (212, 175, 55), # 000000 }, "length": 24, "materials": "Alloy metal and carbon composite", }, "h3": { "offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}}, "colours": { "primary": (157, 157, 157), # 707070 "secondary": (0, 0, 0), # 000000 "tertiary": (180, 97, 19), # b46113 }, "length": 24, "materials": "Alloy metal", }, "h4": { "offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}}, "colours": { "primary": (0, 0, 0), # 000000 "secondary": (157, 157, 157), # 9d9d9d "tertiary": (180, 97, 19), # b46113 }, "length": 13, "materials": "Alloy metal", }, "h5": { "offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}}, "colours": { "primary": (111, 111, 111), # 6f6f6f "secondary": (0, 0, 0), # 000000 "tertiary": (180, 97, 19), # b46113 }, "length": 24, "materials": "Alloy metal", }, "h6": { "offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}}, "colours": { "primary": (120, 120, 120), # 787878 "secondary": (0, 0, 0), # 000000 "tertiary": (180, 97, 19), # b46113 }, "length": 22, "materials": "Alloy metal/Salvaged materials", }, "h7": { "offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}}, "colours": { "primary": (192, 192, 192), # c0c0c0 "secondary": (255, 215, 0), # ffd700 "tertiary": (0, 0, 0), # 000000 }, "length": 22, "materials": "Alloy metal and Gold", }, "h8": { "offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}}, "colours": { "primary": (216, 216, 216), # d8d8d8 "secondary": (180, 97, 19), # b46113 "tertiary": (0, 0, 0), # 000000 }, "length": 24, "materials": "Alloy metal/Copper", }, }, "blade": { "b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"}, "b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"}, "b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"}, "b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"}, "b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"}, "b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"}, "b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"}, "b8": { "colour": "Orange", "crystal": ["Ilum crystal", "Ultima Pearl"], "type": "Sith", }, "b9": { "colour": "Black", "crystal": "Obsidian", "type": ["Jedi", "Mandalorian"], }, }, "pommel": { "p1": {"length": 5,}, "p2": {"length": 14,}, "p3": {"length": 3,}, "p4": {"length": 8,}, "p5": {"length": 5,}, "p6": {"length": 5,}, "p7": {"length": 8,}, }, # These are lightsabers for a specific Jedi or Sith. Should use their name instead of "unique_urls": {""}, }
37.313043
89
0.381496
76f83e45ce6ee12f802b7d17751ac89ea6359f61
21,788
py
Python
tests/gpuarray/test_basic_ops.py
canyon289/Theano-PyMC
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
[ "BSD-3-Clause" ]
1
2020-12-30T19:12:52.000Z
2020-12-30T19:12:52.000Z
tests/gpuarray/test_basic_ops.py
canyon289/Theano-PyMC
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
[ "BSD-3-Clause" ]
null
null
null
tests/gpuarray/test_basic_ops.py
canyon289/Theano-PyMC
1a9b04bfe480b758ddfa54ba49c88bee3bec419c
[ "BSD-3-Clause" ]
1
2020-08-15T17:09:10.000Z
2020-08-15T17:09:10.000Z
import numpy as np import pytest import theano import theano.tensor as tt # Don't import test classes otherwise they get tested as part of the file from tests import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip("pygpu") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) TestGpuAlloc = makeTester( name="GpuAllocTester", # The +1 is there to allow the lift to the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) def test_gpujoin_gpualloc(): a = tt.fmatrix("a") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") b = tt.fmatrix("b") b_val = np.asarray(np.random.rand(3, 5), dtype="float32") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): for dtype in ["float32", "int32", "float16"]: check(dtype, 3) # M != N, k = 0 check(dtype, 3, 5) check(dtype, 5, 3) # N == M, k != 0 check(dtype, 3, 3, 1) check(dtype, 3, 3, -1) # N < M, k != 0 check(dtype, 3, 5, 1) check(dtype, 3, 5, -1) # N > M, k != 0 check(dtype, 5, 3, 1) check(dtype, 5, 3, -1) # k > M, -k > N, k > M, k > N check(dtype, 5, 3, 3) check(dtype, 3, 5, 3) check(dtype, 5, 3, -3) check(dtype, 3, 5, -3) check(dtype, 5, 3, 6) check(dtype, 3, 5, -6)
32.962179
88
0.575867
76f8632c56e75a6a31f710898b1568e855cfd849
9,238
py
Python
apps/interactor/tests/commander/commands/test_animations.py
Djelibeybi/photons
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
[ "MIT" ]
51
2020-07-03T08:34:48.000Z
2022-03-16T10:56:08.000Z
apps/interactor/tests/commander/commands/test_animations.py
delfick/photons
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
[ "MIT" ]
81
2020-07-03T08:13:59.000Z
2022-03-31T23:02:54.000Z
apps/interactor/tests/commander/commands/test_animations.py
Djelibeybi/photons
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
[ "MIT" ]
8
2020-07-24T23:48:20.000Z
2021-05-24T17:20:16.000Z
# coding: spec from interactor.commander.store import store, load_commands from photons_app.mimic.event import Events from photons_app import helpers as hp from photons_canvas.points.simple_messages import Set64 from unittest import mock import pytest describe "Animation Commands": async it "can get info and help", server, m: await server.assertCommand( "/v1/lifx/command", {"command": "animation/info"}, json_output={"animations": {}, "paused": []}, ) got = await server.assertCommand( "/v1/lifx/command", {"command": "animation/help"}, ) assert b"Available animations include" in got assert b"* dice" in got assert b"To see options for a particular animation, run this again" in got got = await server.assertCommand( "/v1/lifx/command", {"command": "animation/help", "args": {"animation_name": "dice"}}, ) assert b"dice animation" in got assert b"This animation has the following options:" in got assert b"colour range options" in got async it "can control an animation", server, m: await server.assertCommand( "/v1/lifx/command", {"command": "animation/info"}, json_output={"animations": {}, "paused": []}, ) identity = "first" got = await server.assertCommand( "/v1/lifx/command", {"command": "animation/start", "args": {"identity": identity}}, ) assert "animations" in got assert got["animations"] == [identity] assert got["started"] == identity identity2 = "second" got = await server.assertCommand( "/v1/lifx/command", {"command": "animation/start", "args": {"identity": identity2}}, ) assert "animations" in got identities = [identity, identity2] assert got["animations"] == identities assert got["started"] == identity2 info = await server.assertCommand( "/v1/lifx/command", {"command": "animation/info"}, ) assert info == {"animations": {identity: mock.ANY, identity2: mock.ANY}, "paused": []} # pause await server.assertCommand( "/v1/lifx/command", {"command": "animation/pause", "args": {"pause": identity}}, json_output={"animations": identities, "paused": [identity], "pausing": [identity]}, ) await server.assertCommand( "/v1/lifx/command", {"command": "animation/pause", "args": {"pause": identity2}}, json_output={ "animations": identities, "paused": identities, "pausing": [identity2], }, ) # resume await server.assertCommand( "/v1/lifx/command", {"command": "animation/resume", "args": {"resume": identity2}}, json_output={ "animations": identities, "paused": [identity], "resuming": [identity2], }, ) # pause multiple await server.assertCommand( "/v1/lifx/command", {"command": "animation/pause", "args": {"pause": identities}}, json_output={"animations": identities, "paused": identities, "pausing": identities}, ) # resume await server.assertCommand( "/v1/lifx/command", {"command": "animation/resume", "args": {"resume": identities}}, json_output={ "animations": identities, "paused": [], "resuming": identities, }, ) # pause await server.assertCommand( "/v1/lifx/command", {"command": "animation/pause", "args": {"pause": identity}}, json_output={"animations": identities, "paused": [identity], "pausing": [identity]}, ) # info info = await server.assertCommand( "/v1/lifx/command", {"command": "animation/info"}, ) assert info["animations"] == {identity: mock.ANY, identity2: mock.ANY} assert info["paused"] == [identity] # stop await server.assertCommand( "/v1/lifx/command", {"command": "animation/stop", "args": {"stop": identity}}, json_output={ "animations": [identity, identity2], "paused": [identity], "stopping": [identity], }, ) await m.add(0.5) # info info = await server.assertCommand( "/v1/lifx/command", {"command": "animation/info"}, ) assert info["animations"] == {identity2: mock.ANY} assert info["paused"] == [] async it "pausing an animation actually pauses the animation", devices, server, m: tile = devices["tile"] io = tile.io["MEMORY"] store = devices.store(tile) store.clear() first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64) # start got = await server.assertCommand( "/v1/lifx/command", {"command": "animation/start", "args": {"animations": [["balls", {"every": 3}]]}}, ) identity = got["started"] await first_set_64 now = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now > 0 await m.add(5) now2 = store.count(Events.INCOMING(tile, io, pkt=Set64)) assert now2 > now identity = got["started"] await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now # pause await server.assertCommand( "/v1/lifx/command", {"command": "animation/pause", "args": {"pause": [identity]}}, ) await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # resume await server.assertCommand( "/v1/lifx/command", {"command": "animation/resume", "args": {"resume": [identity]}}, ) await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0 # stop await server.assertCommand( "/v1/lifx/command", {"command": "animation/stop", "args": {"stop": [identity]}}, ) store.clear() await m.add(5) store.clear() await m.add(5) assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0 # info await server.assertCommand( "/v1/lifx/command", {"command": "animation/info"}, json_output={"animations": {}, "paused": []}, ) async it "can get information", server, m: # start got = await server.assertCommand( "/v1/lifx/command", {"command": "animation/start", "args": {"animations": [["balls", {"every": 0.3}]]}}, ) identity = got["started"] info = await server.assertCommand("/v1/lifx/command", {"command": "animation/info"}) assert info["paused"] == [] assert identity in info["animations"] assert info["animations"][identity]["animations_ran"] == 1 assert info["animations"][identity]["current_animation"] == { "name": "balls", "options": { "ball_colors": "<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>", "fade_amount": 0.02, "num_balls": 5, "rate": "<Rate 0.9 -> 1>", }, "started": mock.ANY, } assert info["animations"][identity]["options"]["combined"] assert "unlocked" in info["animations"][identity]["options"]["pauser"] assert info["animations"][identity]["options"]["noisy_network"] == 0 specific = await server.assertCommand( "/v1/lifx/command", {"command": "animation/info", "args": {"identity": identity}} ) info["animations"][identity]["current_animation"]["started"] = mock.ANY assert info["animations"][identity] == specific
31.209459
112
0.544923
76f93238491c8f0f67d7813df6d0b4a6c7ed0a80
245
py
Python
.ipython/profile_pytube/startup/init.py
showa-yojyo/dotfiles
994cc7df0643d69f62cb59550bdd48a42751c345
[ "MIT" ]
null
null
null
.ipython/profile_pytube/startup/init.py
showa-yojyo/dotfiles
994cc7df0643d69f62cb59550bdd48a42751c345
[ "MIT" ]
3
2018-03-27T14:10:18.000Z
2018-03-30T14:06:11.000Z
.ipython/profile_pytube/startup/init.py
showa-yojyo/dotfiles
994cc7df0643d69f62cb59550bdd48a42751c345
[ "MIT" ]
null
null
null
from pytube import YouTube
22.272727
55
0.608163
76fb80b4170accbe860db8c0999717d64544977e
5,741
py
Python
ament_tools/setup_arguments.py
richmattes/ament_tools
2a25cdcc273fcd73e81e8a47fe892a0b5963307d
[ "Apache-2.0" ]
1
2020-05-19T14:33:49.000Z
2020-05-19T14:33:49.000Z
ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/setup_arguments.py
mintforpeople/robobo-ros2-ios-port
1a5650304bd41060925ebba41d6c861d5062bfae
[ "Apache-2.0" ]
null
null
null
ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/setup_arguments.py
mintforpeople/robobo-ros2-ios-port
1a5650304bd41060925ebba41d6c861d5062bfae
[ "Apache-2.0" ]
null
null
null
# Copyright 2015 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import distutils.core import os try: import setuptools except ImportError: pass import subprocess import sys from threading import Lock from ament_tools.build_type import get_command_prefix from ament_tools.helper import quote_shell_command setup_lock = None def get_setup_arguments_with_context(build_type, context): """ Capture the arguments of the setup() function in the setup.py file. To provide a custom environment when introspecting the setup() function a separate Python interpreter is being used which can have an extended PYTHONPATH etc. :param build_type: the build type :param context: the context :type context: :py:class:`ament_tools.context.Context` :returns: a dictionary containing the arguments of the setup() function """ prefix = get_command_prefix( '%s__setup' % build_type, context.build_space, context.build_dependencies) ament_tools_path = os.path.dirname(os.path.dirname(__file__)) setuppy = os.path.join(context.source_space, 'setup.py') if os.name == 'nt': ament_tools_path = ament_tools_path.replace(os.sep, os.altsep) setuppy = setuppy.replace(os.sep, os.altsep) code_lines = [ 'import sys', "sys.path.insert(0, '%s')" % ament_tools_path, 'from ament_tools.setup_arguments import get_setup_arguments', "print(repr(get_setup_arguments('%s')))" % setuppy] # invoke get_setup_arguments() in a separate interpreter cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)] cmd = quote_shell_command(cmd) result = subprocess.run( cmd, stdout=subprocess.PIPE, shell=True, check=True) output = result.stdout.decode() return ast.literal_eval(output) def get_setup_arguments(setup_py_path): """ Capture the arguments of the setup() function in the setup.py file. The function is being run within the current Python interpreter. Therefore the processed setup.py file can not have any additional dependencies not available in the current environment. :param setup_py_path: the path to the setup.py file :returns: a dictionary containing the arguments of the setup() function """ global setup_lock if not setup_lock: setup_lock = Lock() assert os.path.basename(setup_py_path) == 'setup.py' # prevent side effects in other threads with setup_lock: # change to the directory containing the setup.py file old_cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(setup_py_path))) try: data = {} mock_setup = create_mock_setup_function(data) # replace setup() function of distutils and setuptools # in order to capture its arguments try: distutils_setup = distutils.core.setup distutils.core.setup = mock_setup try: setuptools_setup = setuptools.setup setuptools.setup = mock_setup except NameError: pass # evaluate the setup.py file with open('setup.py', 'r') as h: exec(h.read()) finally: distutils.core.setup = distutils_setup try: setuptools.setup = setuptools_setup except NameError: pass return data finally: os.chdir(old_cwd) def create_mock_setup_function(data): """ Create a mock function to capture its arguments. It can replace either distutils.core.setup or setuptools.setup. :param data: a dictionary which is updated with the captured arguments :returns: a function to replace disutils.core.setup and setuptools.setup """ return setup def get_data_files_mapping(data_files): """ Transform the data_files structure into a dictionary. :param data_files: either a list of source files or a list of tuples where the first element is the destination path and the second element is a list of source files :returns: a dictionary mapping the source file to a destination file """ mapping = {} for data_file in data_files: if isinstance(data_file, tuple): assert len(data_file) == 2 dest = data_file[0] assert not os.path.isabs(dest) sources = data_file[1] assert isinstance(sources, list) for source in sources: assert not os.path.isabs(source) mapping[source] = os.path.join(dest, os.path.basename(source)) else: assert not os.path.isabs(data_file) mapping[data_file] = os.path.basename(data_file) return mapping
35.006098
79
0.656854
76fc510648fa61f30ccc12c1c9b02c19d255e9c6
870
py
Python
tests/functional/test_soft_round_inverse.py
tallamjr/NeuralCompression
21d05ec0d9f8c52d8742fde36f569b4dad2842a5
[ "MIT" ]
233
2021-07-19T18:50:21.000Z
2022-03-30T22:06:40.000Z
tests/functional/test_soft_round_inverse.py
tallamjr/NeuralCompression
21d05ec0d9f8c52d8742fde36f569b4dad2842a5
[ "MIT" ]
79
2021-07-22T13:33:45.000Z
2022-02-09T16:38:42.000Z
tests/functional/test_soft_round_inverse.py
tallamjr/NeuralCompression
21d05ec0d9f8c52d8742fde36f569b4dad2842a5
[ "MIT" ]
21
2021-07-29T18:27:59.000Z
2022-02-28T02:32:53.000Z
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from neuralcompression.functional import soft_round, soft_round_inverse
24.857143
71
0.61954
76fce814c3b3e855b82681563736510cd9476acb
1,738
py
Python
dizoo/pybullet/config/hopper_ppo_default_config.py
konnase/DI-engine
f803499cad191e9277b10e194132d74757bcfc8e
[ "Apache-2.0" ]
2
2021-07-30T15:55:45.000Z
2021-07-30T16:35:10.000Z
dizoo/pybullet/config/hopper_ppo_default_config.py
konnase/DI-engine
f803499cad191e9277b10e194132d74757bcfc8e
[ "Apache-2.0" ]
null
null
null
dizoo/pybullet/config/hopper_ppo_default_config.py
konnase/DI-engine
f803499cad191e9277b10e194132d74757bcfc8e
[ "Apache-2.0" ]
null
null
null
from easydict import EasyDict hopper_ppo_default_config = dict( env=dict( env_id='HopperMuJoCoEnv-v0', norm_obs=dict(use_norm=False, ), norm_reward=dict(use_norm=False, ), collector_env_num=8, evaluator_env_num=10, use_act_scale=True, n_evaluator_episode=10, stop_value=3000, ), policy=dict( cuda=True, on_policy=True, recompute_adv=True, model=dict( obs_shape=11, action_shape=3, continuous=True, ), continuous=True, learn=dict( epoch_per_collect=10, batch_size=64, learning_rate=3e-4, value_weight=0.5, entropy_weight=0.0, clip_ratio=0.2, adv_norm=True, value_norm=True, ), collect=dict( n_sample=2048, unroll_len=1, discount_factor=0.99, gae_lambda=0.97, ), eval=dict(evaluator=dict(eval_freq=5000, )), other=dict(replay_buffer=dict( replay_buffer_size=10000, replay_buffer_start_size=0, ), ), ), ) hopper_ppo_default_config = EasyDict(hopper_ppo_default_config) main_config = hopper_ppo_default_config hopper_ppo_create_default_config = dict( env=dict( type='pybullet', import_names=['dizoo.pybullet.envs.pybullet_env'], ), env_manager=dict(type='subprocess'), policy=dict( type='ppo', import_names=['ding.policy.ppo'], ), replay_buffer=dict(type='naive', ), ) hopper_ppo_create_default_config = EasyDict(hopper_ppo_create_default_config) create_config = hopper_ppo_create_default_config
27.15625
77
0.596663
76fcfe8188f93389658caff72e97003d25b756ad
1,519
py
Python
cisco_sdwan_policy/List/Application.py
ljm625/cisco_sdwan_policy_python
1dd1361a7c4e8ee36df6176f54583081b4ad800a
[ "MIT" ]
11
2019-11-07T02:22:34.000Z
2022-03-04T17:47:02.000Z
cisco_sdwan_policy/List/Application.py
ljm625/cisco_sdwan_policy_python
1dd1361a7c4e8ee36df6176f54583081b4ad800a
[ "MIT" ]
null
null
null
cisco_sdwan_policy/List/Application.py
ljm625/cisco_sdwan_policy_python
1dd1361a7c4e8ee36df6176f54583081b4ad800a
[ "MIT" ]
6
2019-11-07T02:22:41.000Z
2020-07-30T01:58:51.000Z
import json from cisco_sdwan_policy.BaseObject import BaseObject
29.784314
85
0.579987
76fe32cf212234521487302570fb1379460db739
1,575
py
Python
supervisor/docker/dns.py
zeehio/supervisor
b2f2806465001b4f6500601fa4c6516a404d53b8
[ "Apache-2.0" ]
null
null
null
supervisor/docker/dns.py
zeehio/supervisor
b2f2806465001b4f6500601fa4c6516a404d53b8
[ "Apache-2.0" ]
null
null
null
supervisor/docker/dns.py
zeehio/supervisor
b2f2806465001b4f6500601fa4c6516a404d53b8
[ "Apache-2.0" ]
null
null
null
"""DNS docker object.""" import logging from ..const import ENV_TIME from ..coresys import CoreSysAttributes from .interface import DockerInterface _LOGGER: logging.Logger = logging.getLogger(__name__) DNS_DOCKER_NAME: str = "hassio_dns"
26.25
87
0.576508
76fe3680ef2ec070b0bf345c1f776ebc38adabdb
2,927
py
Python
nuitka/codegen/OperatorCodes.py
hclivess/Nuitka
9c7ec9696e69a3901b25d5bce720c921d45c931b
[ "Apache-2.0" ]
null
null
null
nuitka/codegen/OperatorCodes.py
hclivess/Nuitka
9c7ec9696e69a3901b25d5bce720c921d45c931b
[ "Apache-2.0" ]
1
2019-03-01T11:33:40.000Z
2019-03-01T11:33:40.000Z
nuitka/codegen/OperatorCodes.py
hclivess/Nuitka
9c7ec9696e69a3901b25d5bce720c921d45c931b
[ "Apache-2.0" ]
1
2019-03-26T16:56:21.000Z
2019-03-26T16:56:21.000Z
# Copyright 2019, Kay Hayen, mailto:[email protected] # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Operator code tables These are mostly used to look up the Python C/API from operations or a wrapper used. """ from nuitka.PythonVersions import python_version binary_operator_codes = { # Those commented out in this section have fully specialized variants already. # "Add" : "PyNumber_Add", # "Sub" : "PyNumber_Subtract", # "Div" : "PyNumber_Divide", # "Mult" : "PyNumber_Multiply", # "Mod" : "PyNumber_Remainder", # "Div" : "PyNumber_Divide", # "FloorDiv" : "PyNumber_FloorDivide", # "TrueDiv" : "PyNumber_TrueDivide", # These have their own variants only to make sure the generic code is in-lined # but the CPython code is not in-lined. # "Pow" : "PyNumber_Power", # "IPow" : "PyNumber_InPlacePower", # The others are generic code and would be faster if they had a specialized variant too. "LShift": "PyNumber_Lshift", "RShift": "PyNumber_Rshift", "BitAnd": "PyNumber_And", "BitOr": "PyNumber_Or", "BitXor": "PyNumber_Xor", "IAdd": "PyNumber_InPlaceAdd", "ISub": "PyNumber_InPlaceSubtract", "IMult": "PyNumber_InPlaceMultiply", "IDiv": "PyNumber_InPlaceDivide", "IFloorDiv": "PyNumber_InPlaceFloorDivide", "ITrueDiv": "PyNumber_InPlaceTrueDivide", "IMod": "PyNumber_InPlaceRemainder", "ILShift": "PyNumber_InPlaceLshift", "IRShift": "PyNumber_InPlaceRshift", "IBitAnd": "PyNumber_InPlaceAnd", "IBitOr": "PyNumber_InPlaceOr", "IBitXor": "PyNumber_InPlaceXor", } # Python 3.5 only operator if python_version >= 350: binary_operator_codes["MatMult"] = "PyNumber_MatrixMultiply" binary_operator_codes["IMatMult"] = "PyNumber_InPlaceMatrixMultiply" unary_operator_codes = { "UAdd": ("PyNumber_Positive", 1), "USub": ("PyNumber_Negative", 1), "Invert": ("PyNumber_Invert", 1), "Repr": ("PyObject_Repr", 1), "Not": ("UNARY_NOT", 0), } rich_comparison_codes = { "Lt": "LT", "LtE": "LE", "Eq": "EQ", "NotEq": "NE", "Gt": "GT", "GtE": "GE", } containing_comparison_codes = ("In", "NotIn")
35.26506
92
0.65767
76ffe18f535cd4c67dd1eed479466fb1bd48b6ea
6,130
py
Python
python_modules/dagster-graphql/dagster_graphql/implementation/external.py
rpatil524/dagster
6f918d94cbd543ab752ab484a65e3a40fd441716
[ "Apache-2.0" ]
1
2021-01-31T19:16:29.000Z
2021-01-31T19:16:29.000Z
python_modules/dagster-graphql/dagster_graphql/implementation/external.py
rpatil524/dagster
6f918d94cbd543ab752ab484a65e3a40fd441716
[ "Apache-2.0" ]
null
null
null
python_modules/dagster-graphql/dagster_graphql/implementation/external.py
rpatil524/dagster
6f918d94cbd543ab752ab484a65e3a40fd441716
[ "Apache-2.0" ]
null
null
null
import sys from dagster import check from dagster.config.validate import validate_config_from_snap from dagster.core.host_representation import ExternalPipeline, PipelineSelector, RepositorySelector from dagster.core.workspace.context import BaseWorkspaceRequestContext from dagster.utils.error import serializable_error_info_from_exc_info from graphql.execution.base import ResolveInfo from .utils import UserFacingGraphQLError, capture_error
36.272189
99
0.746656
76fffdbfafd70ccdff333934ec210a4753dad75a
1,552
py
Python
tests/test_utils/test_pywriting_utils.py
heylohousing/quickbase-client
46e4eea3a5c7a2720560e5688eb4f0fbdb607206
[ "MIT" ]
null
null
null
tests/test_utils/test_pywriting_utils.py
heylohousing/quickbase-client
46e4eea3a5c7a2720560e5688eb4f0fbdb607206
[ "MIT" ]
null
null
null
tests/test_utils/test_pywriting_utils.py
heylohousing/quickbase-client
46e4eea3a5c7a2720560e5688eb4f0fbdb607206
[ "MIT" ]
null
null
null
import os from tempfile import TemporaryDirectory from quickbase_client.utils.pywriting_utils import BasicPyFileWriter from quickbase_client.utils.pywriting_utils import PyPackageWriter
31.673469
79
0.595361
0a00e63d1006dbef16f6c53de45b2f52bfe52dea
7,268
py
Python
model/resnet.py
DrMMZ/RetinaNet
0b8491076f2ad344e101f724a2f5b8305adb2d52
[ "MIT" ]
7
2021-07-07T02:59:58.000Z
2021-12-09T04:48:49.000Z
model/resnet.py
DrMMZ/ResFPN
3acd6c629419a9f66da5386f3fd3deb9e8c929ff
[ "MIT" ]
3
2021-11-25T07:21:03.000Z
2022-01-17T18:56:29.000Z
model/resnet.py
DrMMZ/RetinaNet
0b8491076f2ad344e101f724a2f5b8305adb2d52
[ "MIT" ]
2
2021-12-09T01:48:36.000Z
2022-01-08T15:54:58.000Z
""" Residual Networks (ResNet) """ # adapted from # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py import tensorflow as tf def identity_block( input_tensor, filters, stage, block, train_bn=False ): """ Builds an identity shortcut in a bottleneck building block of a ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size, height, width, channels] An input tensor. filters : list, positive integers The number of filters in 3 conv layers at the main path, where last number is equal to input_tensor's channels. stage : integer A number in [2,5] used for generating layer names. block : string A lowercase letter, used for generating layer names. train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. Returns ------- output_tensor : tf tensor, [batch_size, height, width, channels] The output tensor same shape as input_tensor. """ num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix = 'res' + str(stage) + block + '_branch' bn_prefix = 'bn' + str(stage) + block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) x = tf.keras.layers.Add()([input_tensor, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage) + block + '_out')(x) return output_tensor def conv_block( input_tensor, filters, stage, block, strides=(2, 2), train_bn=False ): """ Builds a projection shortcut in a bottleneck block of a ResNet. Parameters ---------- input_tensor : tf tensor, [batch_size, height, width, channels] An input tensor. filters : list, positive integers The number of filters in 3 conv layers at the main path. stage : integer A number in [2,5] used for generating layer names. block : string A lowercase letter, used for generating layer names. strides : tuple, integers, optional The conv layer strides. The default is (2, 2). train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. Returns ------- output_tensor : tf tensor [batch_size, height//strides, width//strides, num_filters_3] where num_filters_3 is the last number in filters, the output tensor. """ num_filters_1, num_filters_2, num_filters_3 = filters conv_prefix = 'res' + str(stage) + block + '_branch' bn_prefix = 'bn' + str(stage) + block + '_branch' x = tf.keras.layers.Conv2D( num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2a')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2b')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Conv2D( num_filters_3, (1,1), name=conv_prefix + '2c')(x) x = tf.keras.layers.BatchNormalization( name=bn_prefix + '2c')(x, training=train_bn) shortcut = tf.keras.layers.Conv2D( num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor) shortcut = tf.keras.layers.BatchNormalization( name=bn_prefix + '1')(shortcut, training=train_bn) x = tf.keras.layers.Add()([shortcut, x]) output_tensor = tf.keras.layers.Activation( 'relu', name='res' + str(stage) + block + '_out')(x) return output_tensor def backbone_resnet(input_image, architecture, stage5=True, train_bn=False): """ Builds a backbone ResNet. Parameters ---------- input_image : tf tensor, [batch_size, height, width, channels] An input tensor. architecture : string The ResNet architecture in {'resnet50', 'resnet101'}. stage5 : boolean, optional Whether create stage5 of network. The default is True. train_bn : boolean, optional Whether one should normalize the layer input by the mean and variance over the current batch. The default is False, i.e., use the moving average of mean and variance to normalize the layer input. Returns ------- outputs : list Feature maps at each stage. """ assert architecture in ['resnet50', 'resnet101'], \ 'Only support ResNet50\101' # stage 1 x = tf.keras.layers.ZeroPadding2D((3,3))(input_image) x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x) x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn) x = tf.keras.layers.Activation('relu')(x) C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x) # stage 2 x = conv_block( x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn) x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block( x, [64,64,256], stage=2, block='c', train_bn=train_bn) # stage 3 x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block( x, [128,128,512], stage=3, block='d', train_bn=train_bn) # stage 4 x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn) num_blocks = {'resnet50':5, 'resnet101':22}[architecture] for i in range(num_blocks): x = identity_block( x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn) C4 = x # stage 5 if stage5: x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn) x = identity_block( x, [512,512,2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block( x, [512,512,2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5]
35.802956
81
0.624381
0a00e9db3835c97e792fd5d157869c740791d2ab
1,060
py
Python
src/main/python/rds_log_cat/parser/mysql57.py
Scout24/rds-log-cat
00147dc2e3ec6fc894fccd5a9cbf7faa71cf7e78
[ "MIT" ]
1
2019-11-07T10:44:28.000Z
2019-11-07T10:44:28.000Z
src/main/python/rds_log_cat/parser/mysql57.py
Scout24/rds-log-cat
00147dc2e3ec6fc894fccd5a9cbf7faa71cf7e78
[ "MIT" ]
2
2017-04-25T13:36:44.000Z
2018-03-12T20:34:21.000Z
src/main/python/rds_log_cat/parser/mysql57.py
ImmobilienScout24/rds-log-cat
00147dc2e3ec6fc894fccd5a9cbf7faa71cf7e78
[ "MIT" ]
1
2021-01-27T19:08:09.000Z
2021-01-27T19:08:09.000Z
from rds_log_cat.parser.parser import Parser, LineParserException
31.176471
102
0.592453
0a01272b6dc30ae670eab0e73c74a21ff812e409
16,090
py
Python
corpustools/neighdens/neighborhood_density.py
PhonologicalCorpusTools/CorpusTools
ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e
[ "BSD-3-Clause" ]
97
2015-07-06T18:58:43.000Z
2022-03-10T23:00:07.000Z
corpustools/neighdens/neighborhood_density.py
PhonologicalCorpusTools/CorpusTools
ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e
[ "BSD-3-Clause" ]
443
2015-03-10T21:24:39.000Z
2022-03-22T22:20:13.000Z
corpustools/neighdens/neighborhood_density.py
PhonologicalCorpusTools/CorpusTools
ff5a7c06e2f7a478c5a239de7a78ef7eb5f4a45e
[ "BSD-3-Clause" ]
22
2015-07-19T18:56:24.000Z
2020-09-17T17:58:12.000Z
from functools import partial from corpustools.corpus.classes import Word from corpustools.symbolsim.edit_distance import edit_distance from corpustools.symbolsim.khorsi import khorsi from corpustools.symbolsim.phono_edit_distance import phono_edit_distance from corpustools.symbolsim.phono_align import Aligner from corpustools.multiproc import filter_mp, score_mp def neighborhood_density_all_words(corpus_context, tierdict, tier_type = None, sequence_type = None, algorithm = 'edit_distance', max_distance = 1, output_format = 'spelling', num_cores = -1, settable_attr = None, collapse_homophones = False, stop_check = None, call_back = None): """Calculate the neighborhood density of all words in the corpus and adds them as attributes of the words. Parameters ---------- corpus_context : CorpusContext Context manager for a corpus algorithm : str The algorithm used to determine distance max_distance : float, optional Maximum edit distance from the queried word to consider a word a neighbor. stop_check : callable, optional Optional function to check whether to gracefully terminate early call_back : callable, optional Optional function to supply progress information during the function settable_attr: string Name of attribute that neighbourhood density results will be assigned to """ function = partial(neighborhood_density, corpus_context, tierdict = tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm = algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) if call_back is not None: call_back('Calculating neighborhood densities...') call_back(0,len(corpus_context)) cur = 0 results = dict() last_value_removed = None last_key_removed = None if num_cores == -1 or num_cores == 1: for w in corpus_context: if stop_check is not None and stop_check(): return if last_value_removed: tierdict[last_key_removed].append(last_value_removed) w_sequence = getattr(w, corpus_context.sequence_type) last_key_removed = str(w_sequence) for i, item in enumerate(tierdict[last_key_removed]): if str(item) == str(w): last_value_removed = tierdict[last_key_removed].pop(i) break res = neighborhood_density(corpus_context, w, tierdict, tier_type = tier_type, sequence_type = sequence_type, algorithm = algorithm, max_distance = max_distance, collapse_homophones = collapse_homophones) results[str(w)] = [getattr(r, output_format) for r in res[1]] setattr(w.original, settable_attr.name, res[0]) # for w in corpus_context: # if stop_check is not None and stop_check(): # return # cur += 1 # call_back(cur) # res = function(w) # results[str(w)] = [getattr(r, output_format) for r in res[1]] # setattr(w.original, settable_attr.name, res[0]-1) # #the -1 is to account for the fact that words are counted as their own neighbour, and this is incorrect # #subtracting 1 here is easier than fixing the neighbourhood density algorithm else: iterable = ((w,) for w in corpus_context) neighbors = score_mp(iterable, function, num_cores, call_back, stop_check, chunk_size = 1) for n in neighbors: #Have to look up the key, then look up the object due to how #multiprocessing pickles objects setattr(corpus_context.corpus.find(corpus_context.corpus.key(n[0])), #corpus_context.attribute.name, n[1][0]) settable_attr.name, n[1][0]) return results def neighborhood_density(corpus_context, query, tierdict, algorithm = 'edit_distance', max_distance = 1, collapse_homophones = False, force_quadratic = False, file_type = None, tier_type=None, sequence_type = None, stop_check = None, call_back = None): """Calculate the neighborhood density of a particular word in the corpus. Parameters ---------- corpus_context : CorpusContext Context manager for a corpus query : Word The word whose neighborhood density to calculate. algorithm : str The algorithm used to determine distance max_distance : float, optional Maximum edit distance from the queried word to consider a word a neighbor force_quadratic : bool Force use of the less efficient quadratic algorithm even when finding edit distance of 1 neighborhoods stop_check : callable, optional Optional function to check whether to gracefully terminate early call_back : callable, optional Optional function to supply progress information during the function Returns ------- tuple(int, set) Tuple of the number of neighbors and the set of neighbor Words. """ matches = [] query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding neighbors for {}...'.format(query)) call_back(0,len(corpus_context)) cur = 0 if algorithm == 'edit_distance' and max_distance == 1 and not force_quadratic: return fast_neighborhood_density(corpus_context, query, corpus_context.sequence_type, tier_type, tierdict, file_type=file_type, collapse_homophones=collapse_homophones) if algorithm == 'edit_distance': is_neighbor = partial(_is_edit_distance_neighbor, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'phono_edit_distance': is_neighbor = partial(_is_phono_edit_distance_neighbor, specifier = corpus_context.specifier, sequence_type = corpus_context.sequence_type, max_distance = max_distance) elif algorithm == 'khorsi': freq_base = corpus_context.get_frequency_base() is_neighbor = partial(_is_khorsi_neighbor, freq_base = freq_base, sequence_type = corpus_context.sequence_type, max_distance = max_distance) for w in corpus_context: if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 10 == 0: call_back(cur) if not is_neighbor(w, query): continue matches.append(w) neighbors = set(matches)-set([query]) return (len(neighbors), neighbors) def fast_neighborhood_density(corpus_context, query, sequence_type, tier_type, tierdict, file_type=None, trans_delimiter='.', collapse_homophones = False): """Generates all neighbors of edit distance <= 1 and searches for them in corpus_context. Will be faster than neighborhood_density when: n > m * (1 + s), where n: number of words in corpus m: length of query s: size of segment inventory """ neighbors = list() query = ensure_query_is_word(query, corpus_context, sequence_type, tier_type, file_type=file_type) for candidate in generate_neighbor_candidates(corpus_context, query, sequence_type): if tier_type.att_type == 'tier': cand_str = trans_delimiter.join(candidate) else: cand_str = ''.join(candidate) if cand_str in tierdict: for w in tierdict[cand_str]: w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(word, sequence_type) == w_sequence for word in neighbors): continue else: neighbors.append(w) return (len(neighbors), neighbors) def find_mutation_minpairs(corpus_context, query, tier_type = None, collapse_homophones = False, stop_check = None, call_back = None): """Find all minimal pairs of the query word based only on segment mutations (not deletions/insertions) Parameters ---------- corpus_context : CorpusContext Context manager for a corpus query : Word The word whose minimal pairs to find stop_check : callable or None Optional function to check whether to gracefully terminate early call_back : callable or None Optional function to supply progress information during the function Returns ------- list The found minimal pairs for the queried word """ matches = [] sequence_type = corpus_context.sequence_type query = ensure_query_is_word(query, corpus_context, corpus_context.sequence_type, tier_type) if call_back is not None: call_back('Finding neighbors...') call_back(0,len(corpus_context)) cur = 0 al = Aligner(features_tf=False, ins_penalty=float('inf'), del_penalty=float('inf'), sub_penalty=1) for w in corpus_context: w_sequence = getattr(w, sequence_type) query_sequence = getattr(query, sequence_type) if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 10 == 0: call_back(cur) if (len(w_sequence) > len(query_sequence)+1 or len(w_sequence) < len(query_sequence)-1): continue m = al.make_similarity_matrix(query_sequence, w_sequence) if m[-1][-1]['f'] != 1: continue w_sequence = getattr(w, sequence_type) if collapse_homophones and any(getattr(m, sequence_type) == w_sequence for m in matches): continue else: #matches.append(str(w_sequence)) matches.append(w) matches = [m.spelling for m in matches] neighbors = list(set(matches)-set([str(query_sequence)])) return (len(neighbors), neighbors)
44.203297
131
0.627968
0a017ba6441979fea8dcb4bd6912e6e472b2970d
456
py
Python
brokenChains/migrations/0003_auto_20181106_1819.py
bunya017/brokenChains
3e20c834efd7f0ade8e3abe7acf547c093f76758
[ "MIT" ]
1
2018-12-07T09:15:57.000Z
2018-12-07T09:15:57.000Z
brokenChains/migrations/0003_auto_20181106_1819.py
bunya017/brokenChains
3e20c834efd7f0ade8e3abe7acf547c093f76758
[ "MIT" ]
null
null
null
brokenChains/migrations/0003_auto_20181106_1819.py
bunya017/brokenChains
3e20c834efd7f0ade8e3abe7acf547c093f76758
[ "MIT" ]
null
null
null
# Generated by Django 2.1.1 on 2018-11-06 17:19 from django.conf import settings from django.db import migrations
22.8
66
0.64693
0a039f10e8309cc703a9629baacf52288c510305
5,046
py
Python
ex05-td/ex05-td.py
vijaykumarprabhu/rl-course
cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4
[ "MIT" ]
null
null
null
ex05-td/ex05-td.py
vijaykumarprabhu/rl-course
cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4
[ "MIT" ]
null
null
null
ex05-td/ex05-td.py
vijaykumarprabhu/rl-course
cc9db0236bd1908e0fa54eae1b2fcfd609ec0ae4
[ "MIT" ]
1
2020-05-26T20:11:21.000Z
2020-05-26T20:11:21.000Z
import gym import numpy as np from itertools import product import matplotlib.pyplot as plt def print_policy(Q, env): """ This is a helper function to print a nice policy from the Q function""" moves = [u'', u'',u'', u''] if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape policy = np.chararray(dims, unicode=True) policy[:] = ' ' for s in range(len(Q)): idx = np.unravel_index(s, dims) policy[idx] = moves[np.argmax(Q[s])] if env.desc[idx] in ['H', 'G']: policy[idx] = u'' print('\n'.join([''.join([u'{:2}'.format(item) for item in row]) for row in policy])) def plot_V(Q, env): """ This is a helper function to plot the state values from the Q function""" fig = plt.figure() if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape V = np.zeros(dims) for s in range(len(Q)): idx = np.unravel_index(s, dims) V[idx] = np.max(Q[s]) if env.desc[idx] in ['H', 'G']: V[idx] = 0. plt.imshow(V, origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=plt.cm.RdYlGn, interpolation='none') for x, y in product(range(dims[0]), range(dims[1])): plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]), horizontalalignment='center', verticalalignment='center') plt.xticks([]) plt.yticks([]) def plot_Q(Q, env): """ This is a helper function to plot the Q function """ from matplotlib import colors, patches fig = plt.figure() ax = fig.gca() if not hasattr(env, 'desc'): env = env.env dims = env.desc.shape up = np.array([[0, 1], [0.5, 0.5], [1,1]]) down = np.array([[0, 0], [0.5, 0.5], [1,0]]) left = np.array([[0, 0], [0.5, 0.5], [0,1]]) right = np.array([[1, 0], [0.5, 0.5], [1,1]]) tri = [left, down, right, up] pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]] cmap = plt.cm.RdYlGn norm = colors.Normalize(vmin=.0,vmax=.6) ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap) ax.grid(which='major', color='black', linestyle='-', linewidth=2) for s in range(len(Q)): idx = np.unravel_index(s, dims) x, y = idx if env.desc[idx] in ['H', 'G']: ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0))) plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0), horizontalalignment='center', verticalalignment='center') continue for a in range(len(tri)): ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a]))) plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]), horizontalalignment='center', verticalalignment='center', fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal')) plt.xticks([]) plt.yticks([]) env=gym.make('FrozenLake-v0') #env=gym.make('FrozenLake-v0', is_slippery=False) #env=gym.make('FrozenLake-v0', map_name="8x8") print("Running sarsa...") Q = sarsa(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show() print("Running qlearning") Q = qlearning(env) plot_V(Q, env) plot_Q(Q, env) print_policy(Q, env) plt.show()
32.346154
104
0.561038
0a03afbc022ab3ed1e3b4074455a3f3fdefc3a2e
1,189
py
Python
app/modules/ai_lab/migrations/0003_ailabcasestudy.py
nickmoreton/nhsx-website
2397d1308376c02b75323d30e6bc916af0daac9d
[ "MIT" ]
50
2019-04-04T17:50:00.000Z
2021-08-05T15:08:37.000Z
app/modules/ai_lab/migrations/0003_ailabcasestudy.py
nickmoreton/nhsx-website
2397d1308376c02b75323d30e6bc916af0daac9d
[ "MIT" ]
434
2019-04-04T18:25:32.000Z
2022-03-31T18:23:37.000Z
app/modules/ai_lab/migrations/0003_ailabcasestudy.py
nhsx-mirror/nhsx-website
2133b4e275ca35ff77f7d6874e809f139ec4bf86
[ "MIT" ]
23
2019-04-04T09:52:07.000Z
2021-04-11T07:41:47.000Z
# Generated by Django 3.0.4 on 2020-07-14 11:00 from django.db import migrations, models import django.db.models.deletion
29
68
0.444071
0a03cda07d112635217a5bbdc7ec5274c0658a7a
3,258
py
Python
requests/UpdateWorkbookConnectionRequest.py
divinorum-webb/python-tableau-api
9d3f130d63b15307ad2b23e2273b52790b8d9018
[ "Apache-2.0" ]
1
2019-06-08T22:19:40.000Z
2019-06-08T22:19:40.000Z
requests/UpdateWorkbookConnectionRequest.py
divinorum-webb/python-tableau-api
9d3f130d63b15307ad2b23e2273b52790b8d9018
[ "Apache-2.0" ]
null
null
null
requests/UpdateWorkbookConnectionRequest.py
divinorum-webb/python-tableau-api
9d3f130d63b15307ad2b23e2273b52790b8d9018
[ "Apache-2.0" ]
null
null
null
from .BaseRequest import BaseRequest
36.2
107
0.634131
0a04207ceaf45ab945588b6a283b882bf8a8d0e4
1,116
py
Python
frappe/website/doctype/website_route_meta/test_website_route_meta.py
oryxsolutions/frappe
d193ea22d17ca40d57432040a8afad72287d9e23
[ "MIT" ]
null
null
null
frappe/website/doctype/website_route_meta/test_website_route_meta.py
oryxsolutions/frappe
d193ea22d17ca40d57432040a8afad72287d9e23
[ "MIT" ]
null
null
null
frappe/website/doctype/website_route_meta/test_website_route_meta.py
oryxsolutions/frappe
d193ea22d17ca40d57432040a8afad72287d9e23
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2019, Frappe Technologies and Contributors # License: MIT. See LICENSE import unittest import frappe from frappe.utils import set_request from frappe.website.serve import get_response test_dependencies = ["Blog Post"]
27.219512
96
0.689964
0a061597dffbdc657df9899df8da9b8cc5a53c7e
644
py
Python
test/unittests/test_AgRunoff.py
rajadain/gwlf-e
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
[ "Apache-2.0" ]
null
null
null
test/unittests/test_AgRunoff.py
rajadain/gwlf-e
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
[ "Apache-2.0" ]
null
null
null
test/unittests/test_AgRunoff.py
rajadain/gwlf-e
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
[ "Apache-2.0" ]
null
null
null
import numpy as np from .VariableUnitTest import VariableUnitTest from gwlfe.MultiUse_Fxns.Runoff import AgRunoff
37.882353
118
0.630435
0a0650316e52ee5d4a9ff4c95b3303130df01427
3,397
py
Python
lingvo/tasks/car/car_layers_test.py
Harshs27/lingvo
bd396e651488b2e2c4a7416be077b4a0226c87c8
[ "Apache-2.0" ]
2,611
2018-10-16T20:14:10.000Z
2022-03-31T14:48:41.000Z
lingvo/tasks/car/car_layers_test.py
Harshs27/lingvo
bd396e651488b2e2c4a7416be077b4a0226c87c8
[ "Apache-2.0" ]
249
2018-10-27T06:02:29.000Z
2022-03-30T18:00:39.000Z
lingvo/tasks/car/car_layers_test.py
Harshs27/lingvo
bd396e651488b2e2c4a7416be077b4a0226c87c8
[ "Apache-2.0" ]
436
2018-10-25T05:31:45.000Z
2022-03-31T07:26:03.000Z
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for car_layers.""" from lingvo import compat as tf from lingvo.core import py_utils from lingvo.core import test_utils from lingvo.tasks.car import car_layers if __name__ == '__main__': tf.test.main()
40.927711
80
0.637916
0a06508cf532e568943c2d6f9f6d327c4504fc73
56
py
Python
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
rodluger/starry
da7fee48c5ef94278f0047be0579e2f13492cdd5
[ "MIT" ]
116
2018-02-23T19:47:15.000Z
2022-02-21T04:43:46.000Z
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
rodluger/starry
da7fee48c5ef94278f0047be0579e2f13492cdd5
[ "MIT" ]
224
2018-02-26T00:41:51.000Z
2022-03-29T10:38:16.000Z
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
rodluger/starry
da7fee48c5ef94278f0047be0579e2f13492cdd5
[ "MIT" ]
25
2018-02-26T18:14:36.000Z
2021-11-30T01:00:56.000Z
import oblate import numpy as np import pytest # TODO!
9.333333
18
0.767857
0a066d9e3ce3fc69b55dd82dd4922f5e05e9b7a2
2,167
py
Python
take_snapshot.py
ITCave/sniff-for-changes-in-directory
59a06c1ca85033273845e8266038bfeacfc9f64d
[ "MIT" ]
null
null
null
take_snapshot.py
ITCave/sniff-for-changes-in-directory
59a06c1ca85033273845e8266038bfeacfc9f64d
[ "MIT" ]
null
null
null
take_snapshot.py
ITCave/sniff-for-changes-in-directory
59a06c1ca85033273845e8266038bfeacfc9f64d
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # @Filename : take_snapshot.py # @Date : 2019-07-15-13-44 # @Project: ITC-sniff-for-changes-in-directory # @Author: Piotr Wooszyn # @Website: http://itcave.eu # @Email: [email protected] # @License: MIT # @Copyright (C) 2019 ITGO Piotr Wooszyn # Generic imports import os import pickle import re import argparse from datetime import datetime def clear_path_string(s): """ Simple function that removes chars that are not allowed in file names :param s: path_string :return: cleaned_path_string """ return (re.sub('[^a-zA-Z]+', '#', s)).lower() def sniff(sniff_path): """ Walks the path and stores information about directory content :param sniff_path: relative or absolute path :return: void """ sniff_path = str(sniff_path).lower() # Variable in which information will be stored dir_store = {} # Recursive loop that walks through all of the subdirectories for subdir, dirs, files in os.walk(sniff_path): if subdir not in dir_store: dir_store[subdir] = {} dir_store[subdir]['subdirs'] = dirs dir_store[subdir]['files'] = files dir_store[subdir]['file_details'] = {} for file in files: f_path = os.path.join(subdir, file) # The information that will be store for each of the files - in this case last file modification date # Important: it's cross-platform relevant! modified_date = os.path.getmtime(f_path) dir_store[subdir]['file_details'][file] = (modified_date,) # Name of a file in which data will be stored dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # Save pickled data with open(dump_name + '.pkl', 'wb') as output: pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL) print("Directory Snapshot taken:", dump_name) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Directory Sniffer') parser.add_argument('path', help='Path to the directory that you want to take a snapshot of') args = parser.parse_args() sniff(args.path)
28.513158
113
0.662206
0a0800535a188f21223ec11106f263b7159026d7
7,221
py
Python
nuitka/nodes/GlobalsLocalsNodes.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-04-13T18:56:02.000Z
2020-04-13T18:56:02.000Z
nuitka/nodes/GlobalsLocalsNodes.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
1
2020-07-11T17:53:56.000Z
2020-07-11T17:53:56.000Z
nuitka/nodes/GlobalsLocalsNodes.py
juanfra684/Nuitka
0e276895fadabefb598232f2ccf8cc7736c9a85b
[ "Apache-2.0" ]
null
null
null
# Copyright 2020, Kay Hayen, mailto:[email protected] # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Globals/locals/single arg dir nodes These nodes give access to variables, highly problematic, because using them, the code may change or access anything about them, so nothing can be trusted anymore, if we start to not know where their value goes. The "dir()" call without arguments is reformulated to locals or globals calls. """ from .ConstantRefNodes import makeConstantRefNode from .DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef
34.222749
102
0.655034
0a096c14b2ddf561ce6b9429ac126077a454bd8e
6,298
py
Python
tests/chainerx_tests/unit_tests/test_scalar.py
yuhonghong66/chainer
15d475f54fc39587abd7264808c5e4b33782df9e
[ "MIT" ]
1
2019-02-12T23:10:16.000Z
2019-02-12T23:10:16.000Z
tests/chainerx_tests/unit_tests/test_scalar.py
nolfwin/chainer
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
[ "MIT" ]
2
2019-05-14T15:45:01.000Z
2019-05-15T07:12:49.000Z
tests/chainerx_tests/unit_tests/test_scalar.py
nolfwin/chainer
8d776fcc1e848cb9d3800a6aab356eb91ae9d088
[ "MIT" ]
1
2018-05-28T22:43:34.000Z
2018-05-28T22:43:34.000Z
import math import pytest import chainerx all_scalar_values = [ -2, 1, -1.5, 2.3, True, False, float('inf'), float('nan')] def test_init_invalid(): with pytest.raises(TypeError): chainerx.Scalar("1") # string, which is not a numeric
27.991111
74
0.634963
0a0991a62637e4100b857f9f5423321dcccd74d3
8,265
py
Python
app.py
Tiemoue/SnakeGame
69124d38227502928924cc7dc6c57b41ade5d97c
[ "Apache-2.0" ]
null
null
null
app.py
Tiemoue/SnakeGame
69124d38227502928924cc7dc6c57b41ade5d97c
[ "Apache-2.0" ]
null
null
null
app.py
Tiemoue/SnakeGame
69124d38227502928924cc7dc6c57b41ade5d97c
[ "Apache-2.0" ]
null
null
null
import sys import pygame from app_window import App_window from button import Button from snake import Snake from food import Food from settings import WIDTH, HEIGHT, FONT, BG_COL, QUIT_BUTTON_COLOUR, PLAY_BUTTON_COLOUR, BLACK, FPS, RED
35.320513
105
0.461101
0a0a3ed9310efb11ad8dbed4a513b033dd037f31
4,697
py
Python
pupa/importers/bills.py
datamade/pupa
7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4
[ "BSD-3-Clause" ]
3
2015-11-21T10:39:44.000Z
2019-11-17T16:34:53.000Z
pupa/importers/bills.py
datamade/pupa
7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4
[ "BSD-3-Clause" ]
1
2015-11-23T19:43:48.000Z
2015-11-23T19:45:06.000Z
pupa/importers/bills.py
datamade/pupa
7c7d2937dfa0c8347e47661a6ed42fd28a9e16d4
[ "BSD-3-Clause" ]
5
2015-11-22T09:23:14.000Z
2019-11-17T16:34:57.000Z
from pupa.utils import fix_bill_id from opencivicdata.legislative.models import (Bill, RelatedBill, BillAbstract, BillTitle, BillIdentifier, BillAction, BillActionRelatedEntity, BillSponsorship, BillSource, BillDocument, BillVersion, BillDocumentLink, BillVersionLink) from .base import BaseImporter from ..exceptions import PupaInternalError
48.42268
99
0.569087
0a0a44a50d18d71919437c3d704deabe9ca4ee0b
2,099
py
Python
utilities/classify_ensemble.py
Hazel1994/Paraphrase-detection-on-Quora-and-MSRP
bd464d0d16c57d5f95d91b78add6d9720128826a
[ "MIT" ]
2
2019-09-09T05:12:16.000Z
2019-11-25T06:02:13.000Z
utilities/classify_ensemble.py
Hazel1994/Paraphrase-detection-on-Quora-and-MSRP
bd464d0d16c57d5f95d91b78add6d9720128826a
[ "MIT" ]
null
null
null
utilities/classify_ensemble.py
Hazel1994/Paraphrase-detection-on-Quora-and-MSRP
bd464d0d16c57d5f95d91b78add6d9720128826a
[ "MIT" ]
1
2019-09-22T14:51:38.000Z
2019-09-22T14:51:38.000Z
from sklearn.metrics import f1_score,accuracy_score import numpy as np from utilities.tools import load_model import pandas as pd
31.80303
88
0.685088
0a0ae7fb6e8c16bf95848129bac7852b529505c4
6,799
py
Python
koino/plot/clusters.py
tritas/koino
21ecc30fdb76727b9b4b3cf695a39f6e860a52d6
[ "BSD-3-Clause" ]
null
null
null
koino/plot/clusters.py
tritas/koino
21ecc30fdb76727b9b4b3cf695a39f6e860a52d6
[ "BSD-3-Clause" ]
null
null
null
koino/plot/clusters.py
tritas/koino
21ecc30fdb76727b9b4b3cf695a39f6e860a52d6
[ "BSD-3-Clause" ]
null
null
null
# coding=utf-8 import logging import traceback from os import makedirs from os.path import exists, join from textwrap import fill import matplotlib.patheffects as PathEffects import matplotlib.pyplot as plt import numpy as np import seaborn as sns from koino.plot import big_square, default_alpha from matplotlib import cm from ..utils.base import jaccard def plot_cluster_assignments( X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title="" ): """Clustering assignments scatter plot Notes ----- Can use mean or median to fix cluster centroid coordinates.""" if cluster_names is None: cluster_names = ["Cluster {}".format(i + 1) for i in range(n_clusters)] # We first reorder the data points according to the centroids labels X = np.vstack([X[y == i] for i in range(n_clusters)]) y = np.hstack([y[y == i] for i in range(n_clusters)]) # Choose a color palette with seaborn. palette = np.array(sns.color_palette("hls", n_clusters)) fig, ax = plt.subplots(figsize=big_square) # for i in range(n_clusters): # mask = y == i # ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i], # label=cluster_names[i]) ax.set_title(title) ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)]) ax.axis("off") # Add the labels for each cluster. for i in range(n_clusters): # Position of each label. samples = np.atleast_2d(X[y == i, :2]) if not len(samples): logging.warning( "Probably singular cluster {} (shape:{})".format(i + 1, X[y == i].shape) ) continue xtext, ytext = np.median(samples, axis=0) name = fill(cluster_names[i], width=20) assert np.isfinite(xtext) assert np.isfinite(ytext) txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha="left") txt.set_path_effects( [PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()] ) # plt.legend() figure_fp = join(figures_dir, "Clustered {}.png".format(title)) fig.tight_layout() try: fig.savefig(figure_fp, transparent=transparent) except ValueError: logging.warning(traceback.format_exc()) finally: plt.close() plt.clf() def overlap_jaccard( indx, y_a, y_b, names_a, names_b, n_a=None, n_b=None, figsize=None, output_dir=None, alabel="socio-demographic", blabel="purchases", transparent=False, ): """Compute and plot contingency tables based on set intersection and jaccard score. # TODO: Normaliser par len(sd_set) ou len(diet_set) ? """ if not (n_a or n_b) or not output_dir: return elif output_dir and not exists(output_dir): makedirs(output_dir) else: assert n_a and n_b assert len(indx) == len(y_a) == len(y_b) assert len(names_a) == n_a assert len(names_b) == n_b a_sets = [set(indx[y_a == i]) for i in range(n_a)] b_sets = [set(indx[y_b == i]) for i in range(n_b)] inter_sets = np.asarray( [[len(set_a & set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_ ) fig, ax = plt.subplots(figsize=figsize) plt.title("Overlap between {} and {} clusters".format(alabel, blabel)) sns.heatmap( inter_sets, annot=True, fmt="6.0f", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() inter_path = join(output_dir, "Clusters Intersection.png") plt.savefig(inter_path, transparent=transparent) plt.close() plt.clf() jac_arr = np.asarray( [[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets], dtype=np.float_, ) fig, ax = plt.subplots(figsize=figsize) plt.title("Jaccard scores between {} and {} clusters".format(alabel, blabel)) sns.heatmap( jac_arr, annot=True, fmt=".3f", ax=ax, square=True, xticklabels=names_a, yticklabels=names_b, ) plt.tight_layout() jaccard_path = join(output_dir, "Clusters Jaccard.png") plt.savefig(jaccard_path, transparent=transparent) plt.close() plt.clf()
31.188073
88
0.633034
0a0c72972354861b109e6305d555a377963ca24f
63
py
Python
python/testData/stubs/FullyQualifiedTypingNamedTuple.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/stubs/FullyQualifiedTypingNamedTuple.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/stubs/FullyQualifiedTypingNamedTuple.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
import typing nt = typing.NamedTuple("name", [("field", str)])
21
48
0.666667
0a0e5c306cd6cb5140e3d9096d9aec435b5e905a
637
py
Python
src/plat/index_news_remove.py
jack139/cnnc
c32611ec01af50bedb67dcd4c8a28e4b0c7a9aef
[ "BSD-2-Clause" ]
null
null
null
src/plat/index_news_remove.py
jack139/cnnc
c32611ec01af50bedb67dcd4c8a28e4b0c7a9aef
[ "BSD-2-Clause" ]
null
null
null
src/plat/index_news_remove.py
jack139/cnnc
c32611ec01af50bedb67dcd4c8a28e4b0c7a9aef
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # import web import time from bson.objectid import ObjectId from config import setting import helper db = setting.db_web # url = ('/plat/index_news_remove')
19.90625
69
0.629513
0a0fca50d08846d8ef07b169b960d9c55f0826dc
3,504
py
Python
esppy/windows/score.py
PetreStegaroiu/python-esppy
d43781e94ad9236916901eeb3737d0b1b18d797a
[ "Apache-2.0" ]
null
null
null
esppy/windows/score.py
PetreStegaroiu/python-esppy
d43781e94ad9236916901eeb3737d0b1b18d797a
[ "Apache-2.0" ]
null
null
null
esppy/windows/score.py
PetreStegaroiu/python-esppy
d43781e94ad9236916901eeb3737d0b1b18d797a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # encoding: utf-8 # # Copyright SAS Institute # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function, division, absolute_import, unicode_literals import os import pandas as pd import six from .base import BaseWindow, attribute from .features import SchemaFeature, ModelsFeature, ConnectorsFeature from .utils import get_args, ensure_element
31.854545
90
0.656393
0a10152195fb9a20741a86fb44035860fed300f4
12,017
py
Python
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Packs/Pwned/Integrations/PwnedV2/PwnedV2.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
from CommonServerPython import * ''' IMPORTS ''' import re import requests # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' VENDOR = 'Have I Been Pwned? V2' MAX_RETRY_ALLOWED = demisto.params().get('max_retry_time', -1) API_KEY = demisto.params().get('api_key') USE_SSL = not demisto.params().get('insecure', False) BASE_URL = 'https://haveibeenpwned.com/api/v3' HEADERS = { 'hibp-api-key': API_KEY, 'user-agent': 'DBOT-API', 'Content-Type': 'application/json', 'Accept': 'application/json' } DEFAULT_DBOT_SCORE_EMAIL = 2 if demisto.params().get('default_dbot_score_email') == 'SUSPICIOUS' else 3 DEFAULT_DBOT_SCORE_DOMAIN = 2 if demisto.params().get('default_dbot_score_domain') == 'SUSPICIOUS' else 3 SUFFIXES = { "email": '/breachedaccount/', "domain": '/breaches?domain=', "username": '/breachedaccount/', "paste": '/pasteaccount/', "email_truncate_verified": '?truncateResponse=false&includeUnverified=true', "domain_truncate_verified": '&truncateResponse=false&includeUnverified=true', "username_truncate_verified": '?truncateResponse=false&includeUnverified=true' } RETRIES_END_TIME = datetime.min ''' HELPER FUNCTIONS ''' def html_description_to_human_readable(breach_description): """ Converting from html description to hr :param breach_description: Description of breach from API response :return: Description string that altered HTML urls to clickable urls for better readability in war-room """ html_link_pattern = re.compile('<a href="(.+?)"(.+?)>(.+?)</a>') patterns_found = html_link_pattern.findall(breach_description) for link in patterns_found: html_actual_address = link[0] html_readable_name = link[2] link_from_desc = '[' + html_readable_name + ']' + '(' + html_actual_address + ')' breach_description = re.sub(html_link_pattern, link_from_desc, breach_description, count=1) return breach_description ''' COMMANDS + REQUESTS FUNCTIONS ''' def test_module(args_dict): """ If the http request was successful the test will return OK :return: 3 arrays of outputs """ http_request('GET', SUFFIXES.get("username", '') + 'test') return ['ok'], [None], [None] def pwned_email_command(args_dict): """ Executing the pwned request for emails list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the email list is needed :return: 3 arrays of outputs """ email_list = argToList(args_dict.get('email', '')) api_email_res_list, api_paste_res_list = pwned_email(email_list) md_list = [] ec_list = [] for email, api_email_res, api_paste_res in zip(email_list, api_email_res_list, api_paste_res_list): md_list.append(data_to_markdown('Email', email, api_email_res, api_paste_res)) ec_list.append(email_to_entry_context(email, api_email_res or [], api_paste_res or [])) return md_list, ec_list, api_email_res_list def pwned_email(email_list): """ Executing the http requests :param email_list: the email list that needed for the http requests :return: 2 arrays of http requests outputs """ api_email_res_list = [] api_paste_res_list = [] for email in email_list: email_suffix = SUFFIXES.get("email") + email + SUFFIXES.get("email_truncate_verified") paste_suffix = SUFFIXES.get("paste") + email api_email_res_list.append(http_request('GET', url_suffix=email_suffix)) api_paste_res_list.append(http_request('GET', url_suffix=paste_suffix)) return api_email_res_list, api_paste_res_list def pwned_domain_command(args_dict): """ Executing the pwned request for domains list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the domain list is needed :return: 3 arrays of outputs """ domain_list = argToList(args_dict.get('domain', '')) api_res_list = pwned_domain(domain_list) md_list = [] ec_list = [] for domain, api_res in zip(domain_list, api_res_list): md_list.append(data_to_markdown('Domain', domain, api_res)) ec_list.append(domain_to_entry_context(domain, api_res or [])) return md_list, ec_list, api_res_list def pwned_domain(domain_list): """ Executing the http request :param domain_list: the domains list that needed for the http requests :return: an array of http requests outputs """ api_res_list = [] for domain in domain_list: suffix = SUFFIXES.get("domain") + domain + SUFFIXES.get("domain_truncate_verified") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list def pwned_username_command(args_dict): """ Executing the pwned request for usernames list, in order to support list input, the function returns 3 lists of outputs :param args_dict: the demisto argument - in this case the username list is needed :return: 3 arrays of outputs """ username_list = argToList(args_dict.get('username', '')) api_res_list = pwned_username(username_list) md_list = [] ec_list = [] for username, api_res in zip(username_list, api_res_list): md_list.append(data_to_markdown('Username', username, api_res)) ec_list.append(domain_to_entry_context(username, api_res or [])) return md_list, ec_list, api_res_list def pwned_username(username_list): """ Executing the http request :param username_list: the username list that needed for the http requests :return: an array of http requests outputs """ api_res_list = [] for username in username_list: suffix = SUFFIXES.get("username") + username + SUFFIXES.get("username_truncate_verified") api_res_list.append(http_request('GET', url_suffix=suffix)) return api_res_list command = demisto.command() LOG('Command being called is: {}'.format(command)) try: handle_proxy() set_retry_end_time() commands = { 'test-module': test_module, 'email': pwned_email_command, 'pwned-email': pwned_email_command, 'domain': pwned_domain_command, 'pwned-domain': pwned_domain_command, 'pwned-username': pwned_username_command } if command in commands: md_list, ec_list, api_email_res_list = commands[command](demisto.args()) for md, ec, api_paste_res in zip(md_list, ec_list, api_email_res_list): return_outputs(md, ec, api_paste_res) # Log exceptions except Exception as e: return_error(str(e))
34.042493
120
0.659732
0a1109b1ce78a5e3058c1f4aa17021228f40ef11
817
py
Python
moshmosh/extensions/pipelines.py
Aloxaf/moshmosh
0cef4e3e574adabc7821a657bceba1254ca20f99
[ "MIT" ]
114
2019-07-12T19:00:20.000Z
2021-12-02T17:28:36.000Z
moshmosh/extensions/pipelines.py
Aloxaf/moshmosh
0cef4e3e574adabc7821a657bceba1254ca20f99
[ "MIT" ]
19
2019-07-12T18:34:59.000Z
2022-01-01T03:37:03.000Z
moshmosh/extensions/pipelines.py
Aloxaf/moshmosh
0cef4e3e574adabc7821a657bceba1254ca20f99
[ "MIT" ]
7
2019-07-14T23:15:44.000Z
2021-12-27T21:15:17.000Z
from moshmosh.extension import Extension from moshmosh.ast_compat import ast
27.233333
71
0.597307
0a1121422d09eb0d72dfd59abaf853f521226d5b
3,641
py
Python
postpatch.py
mr-ma/basic-self-checksumming
ce3a0306fd96cc54476266bbf612d54201d2b46a
[ "MIT" ]
1
2020-11-25T21:54:28.000Z
2020-11-25T21:54:28.000Z
postpatch.py
mr-ma/basic-self-checksumming
ce3a0306fd96cc54476266bbf612d54201d2b46a
[ "MIT" ]
null
null
null
postpatch.py
mr-ma/basic-self-checksumming
ce3a0306fd96cc54476266bbf612d54201d2b46a
[ "MIT" ]
null
null
null
import argparse import os import r2pipe import struct import mmap import base64 from shutil import copyfile import pprint pp = pprint.PrettyPrinter(indent=4) if __name__ == '__main__': main()
35.009615
124
0.649272
0a114ea68c2fa1e2738f0d3ff99019e72e2ea941
1,074
py
Python
sitewebapp/migrations/0011_auto_20210130_0150.py
deucaleon18/debsoc-nitdgp-website
41bd6ade7f4af143ef34aff01848f830cc533add
[ "MIT" ]
2
2020-12-05T05:34:56.000Z
2020-12-09T10:27:43.000Z
sitewebapp/migrations/0011_auto_20210130_0150.py
deucaleon18/debsoc-nitdgp-website
41bd6ade7f4af143ef34aff01848f830cc533add
[ "MIT" ]
3
2021-06-28T16:47:23.000Z
2021-06-28T16:48:51.000Z
sitewebapp/migrations/0011_auto_20210130_0150.py
deucaleon18/debsoc-nitdgp-website
41bd6ade7f4af143ef34aff01848f830cc533add
[ "MIT" ]
9
2021-01-29T17:06:30.000Z
2021-08-21T18:23:26.000Z
# Generated by Django 2.2.15 on 2021-01-29 20:20 from django.db import migrations, models import django.db.models.deletion
34.645161
148
0.634078
0a12c052ef27cc1782214e2d795d2be846ea918a
6,420
py
Python
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
1
2020-01-22T13:11:23.000Z
2020-01-22T13:11:23.000Z
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
12
2020-02-21T07:24:52.000Z
2020-04-14T09:54:32.000Z
venv/lib/python3.6/site-packages/ansible_collections/community/azure/plugins/modules/azure_rm_availabilityset_info.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2016, Julien Stroheker <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_availabilityset_info short_description: Get Azure Availability Set facts description: - Get facts for a specific availability set or all availability sets. options: name: description: - Limit results to a specific availability set. resource_group: description: - The resource group to search for the desired availability set. tags: description: - List of tags to be matched. extends_documentation_fragment: - azure.azcollection.azure author: - Julien Stroheker (@julienstroheker) deprecated: removed_in: '2.0.0' why: The Ansible collection community.azure is deprecated. Use azure.azcollection instead. alternative: Use M(azure.azcollection.azure_rm_availabilityset_info) instead. ''' EXAMPLES = ''' - name: Get facts for one availability set community.azure.azure_rm_availabilityset_info: name: Testing resource_group: myResourceGroup - name: Get facts for all availability sets in a specific resource group community.azure.azure_rm_availabilityset_info: resource_group: myResourceGroup ''' RETURN = ''' azure_availabilityset: description: List of availability sets dicts. returned: always type: complex contains: location: description: - Location where the resource lives. type: str sample: eastus2 name: description: - Resource name. type: str sample: myAvailabilitySet properties: description: - The properties of the resource. type: dict contains: platformFaultDomainCount: description: - Fault Domain count. type: int sample: 3 platformUpdateDomainCount: description: - Update Domain count. type: int sample: 2 virtualMachines: description: - A list of references to all virtualmachines in the availability set. type: list sample: [] sku: description: - Location where the resource lives. type: str sample: Aligned type: description: - Resource type. type: str sample: "Microsoft.Compute/availabilitySets" tags: description: - Resource tags. type: dict sample: { env: sandbox } ''' from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase try: from msrestazure.azure_exceptions import CloudError except Exception: # handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'AvailabilitySet' def main(): """Main module execution code path""" AzureRMAvailabilitySetInfo() if __name__ == '__main__': main()
28.789238
132
0.588006
0a1466d8bab50ddcdbbd51b7ac94f3df778f4c3c
40,433
py
Python
tests/v3_api/common.py
sowmyav27/rancher
a277d958cfcafca22f5da26b3a4582edd9cfd2af
[ "Apache-2.0" ]
null
null
null
tests/v3_api/common.py
sowmyav27/rancher
a277d958cfcafca22f5da26b3a4582edd9cfd2af
[ "Apache-2.0" ]
null
null
null
tests/v3_api/common.py
sowmyav27/rancher
a277d958cfcafca22f5da26b3a4582edd9cfd2af
[ "Apache-2.0" ]
null
null
null
import inspect import json import os import random import subprocess import time import requests import ast import paramiko import rancher from rancher import ApiError from lib.aws import AmazonWebServices DEFAULT_TIMEOUT = 120 DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300 CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "http://localhost:80") ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None") CATTLE_API_URL = CATTLE_TEST_URL + "/v3" kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)), "k8s_kube_config") MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200")) TEST_IMAGE = "sangeetha/mytestcontainer" CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "") RANCHER_CLEANUP_CLUSTER = \ ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True")) env_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "rancher_env.config") CLUSTER_NAME_2 = ""
34.946413
113
0.635471
0a148b5d990f7bb1b408caafa5a8cdf6862a40c6
1,195
py
Python
LeetCode/Python3/String/20. Valid Parentheses.py
WatsonWangZh/CodingPractice
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
[ "MIT" ]
11
2019-09-01T22:36:00.000Z
2021-11-08T08:57:20.000Z
LeetCode/Python3/String/20. Valid Parentheses.py
WatsonWangZh/LeetCodePractice
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
[ "MIT" ]
null
null
null
LeetCode/Python3/String/20. Valid Parentheses.py
WatsonWangZh/LeetCodePractice
dc057dd6ea2fc2034e14fd73e07e73e6364be2ae
[ "MIT" ]
2
2020-05-27T14:58:52.000Z
2020-05-27T15:04:17.000Z
# Given a string containing just the characters '(', ')', '{', '}', '[' and ']', # determine if the input string is valid. # An input string is valid if: # Open brackets must be closed by the same type of brackets. # Open brackets must be closed in the correct order. # Note that an empty string is also considered valid. # Example 1: # Input: "()" # Output: true # Example 2: # Input: "()[]{}" # Output: true # Example 3: # Input: "(]" # Output: false # Example 4: # Input: "([)]" # Output: false # Example 5: # Input: "{[]}" # Output: true if __name__ == "__main__": main()
21.727273
81
0.512134
0a14fdb015437094dc2620963de3edb83ccea376
1,706
py
Python
backend/ibutsu_server/controllers/health_controller.py
rsnyman/ibutsu-server
3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc
[ "MIT" ]
10
2020-07-07T07:00:00.000Z
2022-03-30T12:21:44.000Z
backend/ibutsu_server/controllers/health_controller.py
rsnyman/ibutsu-server
3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc
[ "MIT" ]
133
2020-07-06T20:10:45.000Z
2022-03-31T15:19:19.000Z
backend/ibutsu_server/controllers/health_controller.py
rsnyman/ibutsu-server
3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc
[ "MIT" ]
9
2020-07-06T17:33:29.000Z
2022-03-07T00:08:00.000Z
from flask import current_app from sqlalchemy.exc import InterfaceError from sqlalchemy.exc import OperationalError try: from ibutsu_server.db.model import Result IS_CONNECTED = True except ImportError: IS_CONNECTED = False def get_health(token_info=None, user=None): """Get a health report :rtype: Health """ return {"status": "OK", "message": "Service is running"} def get_database_health(token_info=None, user=None): """Get a health report for the database :rtype: Health """ response = ({"status": "Pending", "message": "Fetching service status"}, 200) # Try to connect to the database, and handle various responses try: if not IS_CONNECTED: response = ({"status": "Error", "message": "Incomplete database configuration"}, 500) else: Result.query.first() response = ({"status": "OK", "message": "Service is running"}, 200) except OperationalError: response = ({"status": "Error", "message": "Unable to connect to the database"}, 500) except InterfaceError: response = ({"status": "Error", "message": "Incorrect connection configuration"}, 500) except Exception as e: response = ({"status": "Error", "message": str(e)}, 500) return response def get_health_info(token_info=None, user=None): """Get the information about this server :rtype: HealthInfo """ return { "frontend": current_app.config.get("FRONTEND_URL", "http://localhost:3000"), "backend": current_app.config.get("BACKEND_URL", "http://localhost:8080"), "api_ui": current_app.config.get("BACKEND_URL", "http://localhost:8080") + "/api/ui/", }
32.188679
97
0.649472
0a14ffa87c6cf8cc2785c57c735fc9bf74a8348d
9,200
py
Python
src/python/tsnecuda/TSNE.py
rappdw/tsne-cuda
1249948704b0ae1847ebe614801f8a326050b0f4
[ "BSD-3-Clause" ]
1
2019-11-06T21:56:26.000Z
2019-11-06T21:56:26.000Z
src/python/tsnecuda/TSNE.py
amitadate/tsne-cuda
efa209834879bba88814e74d7062539f4de07cc2
[ "BSD-3-Clause" ]
null
null
null
src/python/tsnecuda/TSNE.py
amitadate/tsne-cuda
efa209834879bba88814e74d7062539f4de07cc2
[ "BSD-3-Clause" ]
null
null
null
"""Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors Refs: References [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html """ import numpy as N import ctypes import os import pkg_resources
42.790698
135
0.595978
0a15bb92f32c4317216e7f1662783bb4852671eb
105
py
Python
school/admin/__init__.py
leyyin/university-SE
7cc3625bda787d2e79ab22f30d6f6e732ca9abb3
[ "MIT" ]
3
2015-03-12T15:50:58.000Z
2015-05-04T12:55:19.000Z
school/admin/__init__.py
leyyin/university-SE
7cc3625bda787d2e79ab22f30d6f6e732ca9abb3
[ "MIT" ]
2
2015-05-01T18:24:04.000Z
2015-05-15T15:58:47.000Z
school/admin/__init__.py
leyyin/university-SE
7cc3625bda787d2e79ab22f30d6f6e732ca9abb3
[ "MIT" ]
null
null
null
# contains any CRUD not related to strictly editing users info and courses info from .views import admin
35
79
0.809524
0a1872d6c1f83595585a8fcb3b624041de25bbab
22,787
py
Python
python/helpers/pydev/pydevd_file_utils.py
kirmerzlikin/intellij-community
b5f5b5f38904b32c459203633e4ea17dc2736827
[ "Apache-2.0" ]
1
2019-08-02T21:11:19.000Z
2019-08-02T21:11:19.000Z
python/helpers/pydev/pydevd_file_utils.py
kirmerzlikin/intellij-community
b5f5b5f38904b32c459203633e4ea17dc2736827
[ "Apache-2.0" ]
null
null
null
python/helpers/pydev/pydevd_file_utils.py
kirmerzlikin/intellij-community
b5f5b5f38904b32c459203633e4ea17dc2736827
[ "Apache-2.0" ]
null
null
null
r''' This module provides utilities to get the absolute filenames so that we can be sure that: - The case of a file will match the actual file in the filesystem (otherwise breakpoints won't be hit). - Providing means for the user to make path conversions when doing a remote debugging session in one machine and debugging in another. To do that, the PATHS_FROM_ECLIPSE_TO_PYTHON constant must be filled with the appropriate paths. @note: in this context, the server is where your python process is running and the client is where eclipse is running. E.g.: If the server (your python process) has the structure /user/projects/my_project/src/package/module1.py and the client has: c:\my_project\src\package\module1.py the PATHS_FROM_ECLIPSE_TO_PYTHON would have to be: PATHS_FROM_ECLIPSE_TO_PYTHON = [(r'c:\my_project\src', r'/user/projects/my_project/src')] alternatively, this can be set with an environment variable from the command line: set PATHS_FROM_ECLIPSE_TO_PYTHON=[['c:\my_project\src','/user/projects/my_project/src']] @note: DEBUG_CLIENT_SERVER_TRANSLATION can be set to True to debug the result of those translations @note: the case of the paths is important! Note that this can be tricky to get right when one machine uses a case-independent filesystem and the other uses a case-dependent filesystem (if the system being debugged is case-independent, 'normcase()' should be used on the paths defined in PATHS_FROM_ECLIPSE_TO_PYTHON). @note: all the paths with breakpoints must be translated (otherwise they won't be found in the server) @note: to enable remote debugging in the target machine (pydev extensions in the eclipse installation) import pydevd;pydevd.settrace(host, stdoutToServer, stderrToServer, port, suspend) see parameter docs on pydevd.py @note: for doing a remote debugging session, all the pydevd_ files must be on the server accessible through the PYTHONPATH (and the PATHS_FROM_ECLIPSE_TO_PYTHON only needs to be set on the target machine for the paths that'll actually have breakpoints). ''' from _pydevd_bundle.pydevd_constants import IS_PY2, IS_PY3K, DebugInfoHolder, IS_WINDOWS, IS_JYTHON from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding import json import os.path import sys import traceback _os_normcase = os.path.normcase basename = os.path.basename exists = os.path.exists join = os.path.join try: rPath = os.path.realpath # @UndefinedVariable except: # jython does not support os.path.realpath # realpath is a no-op on systems without islink support rPath = os.path.abspath # defined as a list of tuples where the 1st element of the tuple is the path in the client machine # and the 2nd element is the path in the server machine. # see module docstring for more details. try: PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]')) except Exception: sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n') traceback.print_exc() PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list): sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n') PATHS_FROM_ECLIPSE_TO_PYTHON = [] else: # Converting json lists to tuple PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON] # example: # PATHS_FROM_ECLIPSE_TO_PYTHON = [ # (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy', # r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx') # ] convert_to_long_pathname = lambda filename:filename convert_to_short_pathname = lambda filename:filename get_path_with_real_case = lambda filename:filename if sys.platform == 'win32': try: import ctypes from ctypes.wintypes import MAX_PATH, LPCWSTR, LPWSTR, DWORD GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW GetLongPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetLongPathName.restype = DWORD GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW GetShortPathName.argtypes = [LPCWSTR, LPWSTR, DWORD] GetShortPathName.restype = DWORD # Check that it actually works _get_path_with_real_case(__file__) except: # Something didn't quite work out, leave no-op conversions in place. if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2: traceback.print_exc() else: convert_to_long_pathname = _convert_to_long_pathname convert_to_short_pathname = _convert_to_short_pathname get_path_with_real_case = _get_path_with_real_case elif IS_JYTHON and IS_WINDOWS: if IS_WINDOWS: if IS_JYTHON: else: else: _ide_os = 'WINDOWS' if IS_WINDOWS else 'UNIX' def set_ide_os(os): ''' We need to set the IDE os because the host where the code is running may be actually different from the client (and the point is that we want the proper paths to translate from the client to the server). :param os: 'UNIX' or 'WINDOWS' ''' global _ide_os prev = _ide_os if os == 'WIN': # Apparently PyCharm uses 'WIN' (https://github.com/fabioz/PyDev.Debugger/issues/116) os = 'WINDOWS' assert os in ('WINDOWS', 'UNIX') if prev != os: _ide_os = os # We need to (re)setup how the client <-> server translation works to provide proper separators. setup_client_server_paths(_last_client_server_paths_set) DEBUG_CLIENT_SERVER_TRANSLATION = os.environ.get('DEBUG_PYDEVD_PATHS_TRANSLATION', 'False').lower() in ('1', 'true') # Caches filled as requested during the debug session. NORM_PATHS_CONTAINER = {} NORM_PATHS_AND_BASE_CONTAINER = {} # Returns tuple of absolute path and real path for given filename _ZIP_SEARCH_CACHE = {} _NOT_FOUND_SENTINEL = object() # Now, let's do a quick test to see if we're working with a version of python that has no problems # related to the names generated... try: try: code = rPath.func_code except AttributeError: code = rPath.__code__ if not exists(_NormFile(code.co_filename)): sys.stderr.write('-------------------------------------------------------------------------------\n') sys.stderr.write('pydev debugger: CRITICAL WARNING: This version of python seems to be incorrectly compiled (internal generated filenames are not absolute)\n') sys.stderr.write('pydev debugger: The debugger may still function, but it will work slower and may miss breakpoints.\n') sys.stderr.write('pydev debugger: Related bug: http://bugs.python.org/issue1666807\n') sys.stderr.write('-------------------------------------------------------------------------------\n') sys.stderr.flush() NORM_SEARCH_CACHE = {} initial_norm_paths = _NormPaths except: # Don't fail if there's something not correct here -- but at least print it to the user so that we can correct that traceback.print_exc() # Note: as these functions may be rebound, users should always import # pydevd_file_utils and then use: # # pydevd_file_utils.norm_file_to_client # pydevd_file_utils.norm_file_to_server # # instead of importing any of those names to a given scope. _original_file_to_server = _NormFile norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server _last_client_server_paths_set = [] def setup_client_server_paths(paths): '''paths is the same format as PATHS_FROM_ECLIPSE_TO_PYTHON''' global norm_file_to_client global norm_file_to_server global _last_client_server_paths_set _last_client_server_paths_set = paths[:] # Work on the client and server slashes. python_sep = '\\' if IS_WINDOWS else '/' eclipse_sep = '\\' if _ide_os == 'WINDOWS' else '/' norm_filename_to_server_container = {} norm_filename_to_client_container = {} initial_paths = list(paths) paths_from_eclipse_to_python = initial_paths[:] # Apply normcase to the existing paths to follow the os preferences. for i, (path0, path1) in enumerate(paths_from_eclipse_to_python[:]): if IS_PY2: if isinstance(path0, unicode): path0 = path0.encode(sys.getfilesystemencoding()) if isinstance(path1, unicode): path1 = path1.encode(sys.getfilesystemencoding()) path0 = _fix_path(path0, eclipse_sep) path1 = _fix_path(path1, python_sep) initial_paths[i] = (path0, path1) paths_from_eclipse_to_python[i] = (normcase(path0), normcase(path1)) if not paths_from_eclipse_to_python: # no translation step needed (just inline the calls) norm_file_to_client = _original_file_to_client norm_file_to_server = _original_file_to_server return # only setup translation functions if absolutely needed! norm_file_to_server = _norm_file_to_server norm_file_to_client = _norm_file_to_client setup_client_server_paths(PATHS_FROM_ECLIPSE_TO_PYTHON) # For given file f returns tuple of its absolute path, real path and base name
38.169179
167
0.642647
0a19d381c903a0542a3789f5f4dbe06b87e43247
5,481
py
Python
src/networking/SessionsManager.py
OfekHarel/Orion-Connection-Software
2e767e31f94574bf464e24eaeed87f36b3247ca6
[ "MIT" ]
1
2021-05-18T10:16:05.000Z
2021-05-18T10:16:05.000Z
src/networking/SessionsManager.py
OfekHarel/Orion-Connection-Software
2e767e31f94574bf464e24eaeed87f36b3247ca6
[ "MIT" ]
null
null
null
src/networking/SessionsManager.py
OfekHarel/Orion-Connection-Software
2e767e31f94574bf464e24eaeed87f36b3247ca6
[ "MIT" ]
null
null
null
import os import socket from random import randint from src import Constants from src.Constants import Network from src.networking import NetworkPackets, Actions from src.networking.Client import Client from src.utils.DH_Encryption import Encryption from src.utils.Enum import Enum
35.590909
106
0.550447
0a1a359a4636f368d0f28057e4bf1af274c7fb79
3,332
py
Python
influxdb_service_sdk/model/container/resource_requirements_pb2.py
easyopsapis/easyops-api-python
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
[ "Apache-2.0" ]
5
2019-07-31T04:11:05.000Z
2021-01-07T03:23:20.000Z
influxdb_service_sdk/model/container/resource_requirements_pb2.py
easyopsapis/easyops-api-python
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
[ "Apache-2.0" ]
null
null
null
influxdb_service_sdk/model/container/resource_requirements_pb2.py
easyopsapis/easyops-api-python
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: resource_requirements.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from influxdb_service_sdk.model.container import resource_list_pb2 as influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='resource_requirements.proto', package='container', syntax='proto3', serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'), serialized_pb=_b('\n\x1bresource_requirements.proto\x12\tcontainer\x1a\x38influxdb_service_sdk/model/container/resource_list.proto\"j\n\x14ResourceRequirements\x12\'\n\x06limits\x18\x01 \x01(\x0b\x32\x17.container.ResourceList\x12)\n\x08requests\x18\x02 \x01(\x0b\x32\x17.container.ResourceListBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3') , dependencies=[influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2.DESCRIPTOR,]) _RESOURCEREQUIREMENTS = _descriptor.Descriptor( name='ResourceRequirements', full_name='container.ResourceRequirements', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='limits', full_name='container.ResourceRequirements.limits', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='requests', full_name='container.ResourceRequirements.requests', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=100, serialized_end=206, ) _RESOURCEREQUIREMENTS.fields_by_name['limits'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST _RESOURCEREQUIREMENTS.fields_by_name['requests'].message_type = influxdb__service__sdk_dot_model_dot_container_dot_resource__list__pb2._RESOURCELIST DESCRIPTOR.message_types_by_name['ResourceRequirements'] = _RESOURCEREQUIREMENTS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ResourceRequirements = _reflection.GeneratedProtocolMessageType('ResourceRequirements', (_message.Message,), { 'DESCRIPTOR' : _RESOURCEREQUIREMENTS, '__module__' : 'resource_requirements_pb2' # @@protoc_insertion_point(class_scope:container.ResourceRequirements) }) _sym_db.RegisterMessage(ResourceRequirements) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
40.144578
380
0.801921
0a1b13a3f3b068eb65d58c46e8bda2b6889a1fef
12,738
py
Python
tests/test_http_client.py
bhch/async-stripe
75d934a8bb242f664e7be30812c12335cf885287
[ "MIT", "BSD-3-Clause" ]
8
2021-05-29T08:57:58.000Z
2022-02-19T07:09:25.000Z
tests/test_http_client.py
bhch/async-stripe
75d934a8bb242f664e7be30812c12335cf885287
[ "MIT", "BSD-3-Clause" ]
5
2021-05-31T10:18:36.000Z
2022-01-25T11:39:03.000Z
tests/test_http_client.py
bhch/async-stripe
75d934a8bb242f664e7be30812c12335cf885287
[ "MIT", "BSD-3-Clause" ]
1
2021-05-29T13:27:10.000Z
2021-05-29T13:27:10.000Z
from __future__ import absolute_import, division, print_function import pytest import json import asyncio import stripe import urllib3 from stripe import six, util from async_stripe.http_client import TornadoAsyncHTTPClient pytestmark = pytest.mark.asyncio VALID_API_METHODS = ("get", "post", "delete") def test_request(self, request_mock, mock_response, check_call): mock_response(request_mock, '{"foo": "baz"}', 200) for method in VALID_API_METHODS: abs_url = self.valid_url data = "" if method != "post": abs_url = "%s?%s" % (abs_url, data) data = None headers = {"my-header": "header val"} body, code, _ = self.make_request(method, abs_url, headers, data) assert code == 200 assert body == '{"foo": "baz"}' check_call(request_mock, method, abs_url, data, headers) def test_request_stream( self, mocker, request_mock, mock_response, check_call ): for method in VALID_API_METHODS: mock_response(request_mock, "some streamed content", 200) abs_url = self.valid_url data = "" if method != "post": abs_url = "%s?%s" % (abs_url, data) data = None headers = {"my-header": "header val"} print(dir(self)) print("make_request_stream" in dir(self)) stream, code, _ = self.make_request_stream( method, abs_url, headers, data ) assert code == 200 # Here we need to convert and align all content on one type (string) # as some clients return a string stream others a byte stream. body_content = stream.read() if hasattr(body_content, "decode"): body_content = body_content.decode("utf-8") assert body_content == "some streamed content" mocker.resetall() class TestTornadoAsyncHTTPClient: # :TODO: Write tests for tornado client pass
33.968
87
0.637777
0a1ba6256767aa29fb3040084aca24a7cb8fa6a0
1,685
py
Python
http/static/jsonvis.py
cheeseywhiz/cheeseywhiz
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
[ "MIT" ]
null
null
null
http/static/jsonvis.py
cheeseywhiz/cheeseywhiz
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
[ "MIT" ]
null
null
null
http/static/jsonvis.py
cheeseywhiz/cheeseywhiz
51f6651ddbaeebd14d9ce77776bc4cf3a95511c4
[ "MIT" ]
null
null
null
"""\ Provides html file visualization of a json dataset """ import json import subprocess
28.083333
77
0.570326
0a1ba6be1f357556fe2a856981f28ab99cb28a6a
1,104
py
Python
sim2d_game_analyzer/MainWindow.py
goncamateus/sim2d_game_analyzer
3e264df75896b8856163478535fdeeeef2d66b2f
[ "MIT" ]
1
2020-06-16T05:53:24.000Z
2020-06-16T05:53:24.000Z
sim2d_game_analyzer/MainWindow.py
goncamateus/sim2d_game_analyzer
3e264df75896b8856163478535fdeeeef2d66b2f
[ "MIT" ]
null
null
null
sim2d_game_analyzer/MainWindow.py
goncamateus/sim2d_game_analyzer
3e264df75896b8856163478535fdeeeef2d66b2f
[ "MIT" ]
null
null
null
import sys from PyQt5 import QtGui from PyQt5.QtCore import QEvent, QPoint, Qt from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import (QApplication, QDialog, QGroupBox, QMainWindow, QTabWidget, QVBoxLayout, QWidget) from sim2d_game_analyzer.fmdb_tab import FMDBTab if __name__ == "__main__": app = QApplication(sys.argv) mainwindow = MainWindow() sys.exit(app.exec())
27.6
75
0.663043
0a1bf05862b9f835d8a239dbc4e6161e02b46036
12,543
py
Python
cmd/extractor.py
Grammarian/sicle
94d826477d269c4c3534d83fa2e940de1d923140
[ "Apache-2.0" ]
null
null
null
cmd/extractor.py
Grammarian/sicle
94d826477d269c4c3534d83fa2e940de1d923140
[ "Apache-2.0" ]
null
null
null
cmd/extractor.py
Grammarian/sicle
94d826477d269c4c3534d83fa2e940de1d923140
[ "Apache-2.0" ]
null
null
null
# pip install openpyxl # pip install cuid import os.path import json import datetime from openpyxl import load_workbook import cuid # https://github.com/necaris/cuid.py - create uuid's in the format that graphcool expects SOURCE_XLSX = "./data/CLP_combined.xlsx" EXTRACT_OUTPUT_DIR = "../server/extract" SCHOOL_TITLES = ["ORGANISATION_ID", "ORGANISATION_NAME", "ORG_ELECTORATE", "P_ADDRESS1", "P_SUBURB", "P_STATE", "P_POSTCODE", "S_ADDRESS1", "S_SUBURB", "S_STATE", "S_POSTCODE", "SCHOOL_NAME", "SCH_ELECTORATE", "SCHOOL_ID", "SCHOOL_P_ADDRESS1", "SCHOOL_P_SUBURB", "SCHOOL_P_STATE", "SCHOOL_P_POSTCODE", "SCHOOL_S_ADDRESS1", "SCHOOL_S_SUBURB", "SCHOOL_S_STATE", "SCHOOL_S_POSTCODE", "LOCATION_NAME", "LOC_ELECTORATE", "LOC_S_ADDRESS1", "LOC_S_SUBURB", "LOC_S_STATE", "LOC_S_POSTCODE"] ORGANISATION_FIELDS = {"ORGANISATION_ID": "CLP_ORGANISATION_ID", "ORGANISATION_NAME": "NAME", "ORG_ELECTORATE": "ELECTORATE", "S_ADDRESS1": "ADDRESS", "S_SUBURB": "SUBURB", "S_STATE": "STATE", "S_POSTCODE": "POSTCODE", } SCHOOL_FIELDS = {"SCHOOL_NAME": "NAME", "SCH_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID", "ORGANISATION_ID": "CLP_ORGANISATION_ID", "SCHOOL_S_ADDRESS1": "ADDRESS", "SCHOOL_S_SUBURB": "SUBURB", "SCHOOL_S_STATE": "STATE", "SCHOOL_S_POSTCODE": "POSTCODE", } LOCATION_FIELDS = {"LOCATION_NAME": "NAME", "LOC_ELECTORATE": "ELECTORATE", "SCHOOL_ID": "CLP_SCHOOL_ID", "LOC_S_ADDRESS1": "ADDRESS", "LOC_S_SUBURB": "SUBURB", "LOC_S_STATE": "STATE", "LOC_S_POSTCODE": "POSTCODE"} TEACHER_TITLES = ["TEACHER_ID", "ORGANISATION_NAME", "SCHOOL_NAME", "TEACHER_NAME", "TITLE", "LNAME", "FNAME", "TEACHER_LANGUAGES", "P_ADDRESS1", "P_ADDRESS2", "P_SUBURB", "P_STATE", "P_POSTCODE", "TELEPHONE", "TEL_EVENING", "EMAIL", "MOBILE", "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION", "FIELD_OF_EDUCATION", "DEGREE_COUNTRY", "DEGREE_YEAR", "ORGANISATION_ID", "SCHOOL_ID"] STUDENT_TITLES = ["SCHOOL_NAME", "SCHOOL_ID", "STUDENT_ID", "STUDENT_SRN", "LOCATION_NAME", "STUDENT_LNAME", "STUDENT_FNAME", "DOB", "TEL", "LOCATION_NAME_1"] TEACHER_FIELDS = {"TEACHER_ID": "CLP_TEACHER_ID", "ORGANISATION_NAME": "ORGANISATION_NAME", "SCHOOL_NAME": "SCHOOL_NAME", "TITLE": "TITLE", "LNAME": "FAMILY_NAME", "FNAME": "GIVEN_NAMES", "TEACHER_LANGUAGES": "LANGUAGES", "P_ADDRESS1": "ADDRESS1", "P_ADDRESS2": "ADDRESS2", "P_SUBURB": "SUBURB", "P_STATE": "STATE", "P_POSTCODE": "POSTCODE", "TELEPHONE": "DAY_PHONE", "TEL_EVENING": "EVENING_PHONE", "EMAIL": "EMAIL", "MOBILE": "MOBILE", "LEVEL_TAUGHT": "LEVEL_TAUGHT", "LEVEL_OF_EDUCATION": "EDUCATION_LEVEL", "FIELD_OF_EDUCATION": "EDUCATION_FIELD", "DEGREE_COUNTRY": "EDUCATION_COUNTRY", "DEGREE_YEAR": "EDUCATION_YEAR", "ORGANISATION_ID": "ORGANISATION_ID", "SCHOOL_ID": "SCHOOL_ID", } STUDENT_FIELDS = {"SCHOOL_NAME": "SCHOOL_NAME", "SCHOOL_ID": "SCHOOL_ID", "STUDENT_ID": "CLP_STUDENT_ID", "STUDENT_SRN": "SRN", "LOCATION_NAME": "LOCATION", "STUDENT_LNAME": "FAMILY_NAME", "STUDENT_FNAME": "GIVEN_NAMES", "DOB": "DATE_OF_BIRTH", "TEL": "PHONE", "LOCATION_NAME_1": "DAY_SCHOOL", } def to_camel(s): """Convert an underscored title into camel case. 'PARENT_ORGANISATION_ID' => 'parentOrganisationId'""" bits = [(x.lower() if i == 0 else x.title()) for (i, x) in enumerate(s.split("_"))] return "".join(bits) def inject_required(type_name, dicts): "Inject the required fields that graphcool import required" for x in dicts: x["_typeName"] = type_name x["id"] = cuid.cuid() x["createdAt"] = x["updatedAt"] = now_as_iso8601() return list(dicts) def convert_dob_to_datetime(s): "Convert the string from 99/MON/YY to a ISO date" dt = datetime.datetime.strptime(s, "%d/%b/%y") return dt.isoformat() + ".0Z" # GraphCool import insists on microseconds, hence the ".0" def copy_without(dicts, *keys_to_remove): "Return iterable that contains copies of the given dictionary with all the given keys removed" copies = [x.copy() for x in dicts] for d in copies: for to_remove in keys_to_remove: d.pop(to_remove, None) return copies if __name__ == "__main__": main()
42.090604
118
0.64817
0a1c12c2f6792d992cfb44ac67b60bca865f920c
6,148
py
Python
fHDHR/origin/origin_channels.py
crackers8199/fHDHR_USTVGO
50e284fe004c8b60b07dbe29fa3fb4f69a7b3cfa
[ "WTFPL" ]
null
null
null
fHDHR/origin/origin_channels.py
crackers8199/fHDHR_USTVGO
50e284fe004c8b60b07dbe29fa3fb4f69a7b3cfa
[ "WTFPL" ]
null
null
null
fHDHR/origin/origin_channels.py
crackers8199/fHDHR_USTVGO
50e284fe004c8b60b07dbe29fa3fb4f69a7b3cfa
[ "WTFPL" ]
null
null
null
import os import sys from lxml import html import pathlib import json import m3u8 from seleniumwire import webdriver from selenium.common.exceptions import TimeoutException, NoSuchElementException from selenium.webdriver.firefox.options import Options as FirefoxOptions IFRAME_CSS_SELECTOR = '.iframe-container>iframe' # Disable # Restore
33.78022
134
0.609141
0a1c4786888ba534eda7784354ef48e759ceac1e
40
py
Python
version.py
XioNoX/ansible-junos-stdlib-old
92f33b3bbe6d2cc36d9f2028bb7c792f25ddce80
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
version.py
XioNoX/ansible-junos-stdlib-old
92f33b3bbe6d2cc36d9f2028bb7c792f25ddce80
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
version.py
XioNoX/ansible-junos-stdlib-old
92f33b3bbe6d2cc36d9f2028bb7c792f25ddce80
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
VERSION = "1.4.0" DATE = "2016-Sept-21"
13.333333
21
0.6
0a1cc533cda21da8b86ba8309652b8179ef12637
1,371
py
Python
Episode11-Menu/Pygame/explosion.py
Inksaver/Shmup_With_Pygame_Love2D_Monogame
84838516d9dd9d6639b1b699dca546bfdfec73dc
[ "CC0-1.0" ]
1
2022-02-01T04:05:04.000Z
2022-02-01T04:05:04.000Z
Episode11-Menu/Pygame/explosion.py
Inksaver/Shmup_With_Pygame_Love2D_Monogame
84838516d9dd9d6639b1b699dca546bfdfec73dc
[ "CC0-1.0" ]
null
null
null
Episode11-Menu/Pygame/explosion.py
Inksaver/Shmup_With_Pygame_Love2D_Monogame
84838516d9dd9d6639b1b699dca546bfdfec73dc
[ "CC0-1.0" ]
null
null
null
import pygame import shared
41.545455
84
0.644055
0a1e22f8f6e931aec05c9d718e0438f67bfcceaf
6,950
py
Python
funcx_endpoint/funcx_endpoint/strategies/base.py
arokem/funcX
bd45b93f6c5a1676735b6f8246312d6b468a4b20
[ "Apache-1.1" ]
1
2021-01-18T21:36:22.000Z
2021-01-18T21:36:22.000Z
funcx_endpoint/funcx_endpoint/strategies/base.py
Loonride/funcX
95ae788eac14397a5ec042f0a2ad05c14030b807
[ "Apache-1.1" ]
null
null
null
funcx_endpoint/funcx_endpoint/strategies/base.py
Loonride/funcX
95ae788eac14397a5ec042f0a2ad05c14030b807
[ "Apache-1.1" ]
null
null
null
import sys import threading import logging import time logger = logging.getLogger("interchange.strategy.base")
32.325581
93
0.608633
0a1e3877d30a492ceb0b5445e7d1d835bd228d55
7,409
py
Python
hw3 cnn and vis/gradcam.py
mtang1001/ML-Exploration
6fec422eca127210e948945e6d15526947bfae8e
[ "Apache-2.0" ]
null
null
null
hw3 cnn and vis/gradcam.py
mtang1001/ML-Exploration
6fec422eca127210e948945e6d15526947bfae8e
[ "Apache-2.0" ]
null
null
null
hw3 cnn and vis/gradcam.py
mtang1001/ML-Exploration
6fec422eca127210e948945e6d15526947bfae8e
[ "Apache-2.0" ]
null
null
null
import torch import torchvision import matplotlib import matplotlib.pyplot as plt from PIL import Image from captum.attr import GuidedGradCam, GuidedBackprop from captum.attr import LayerActivation, LayerConductance, LayerGradCam from data_utils import * from image_utils import * from captum_utils import * import numpy as np from visualizers import GradCam plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' X, y, class_names = load_imagenet_val(num=5) # FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this. gc_model = torchvision.models.squeezenet1_1(pretrained=True) gc = GradCam() X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) # Guided Back-Propagation gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gbp_result.shape[0]): plt.subplot(1, 5, i + 1) img = gbp_result[i] img = rescale(img) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_backprop.png') # GradCam # GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below gc_model = torchvision.models.squeezenet1_1(pretrained=True) for param in gc_model.parameters(): param.requires_grad = True X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gradcam_val = gradcam_result[i] img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255) img = img / np.max(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/gradcam.png') # As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam. X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True) y_tensor = torch.LongTensor(y) gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model) gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model) plt.figure(figsize=(24, 24)) for i in range(gradcam_result.shape[0]): gbp_val = gbp_result[i] gradcam_val = np.expand_dims(gradcam_result[i], axis=2) # Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines) img = gradcam_val * gbp_val img = np.expand_dims(img.transpose(2, 0, 1), axis=0) img = np.float32(img) img = torch.from_numpy(img) img = deprocess(img) plt.subplot(1, 5, i + 1) plt.imshow(img) plt.title(class_names[y[i]]) plt.axis('off') plt.gcf().tight_layout() plt.savefig('visualization/guided_gradcam.png') # **************************************************************************************** # # Captum model = torchvision.models.squeezenet1_1(pretrained=True) # We don't want to train the model, so tell PyTorch not to compute gradients # with respect to model parameters. for param in model.parameters(): param.requires_grad = False # Convert X and y from numpy arrays to Torch Tensors X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0) y_tensor = torch.LongTensor(y) conv_module = model.features[12] ############################################################################## # TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. # # visualize_attr_maps function from captum_utils.py is useful for # # visualizing captum outputs # # Use conv_module as the convolution layer for gradcam # ############################################################################## # Computing Guided GradCam ggc = GuidedGradCam(model, conv_module) attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor) # print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape) visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam']) # Computing Guided BackProp gbp = GuidedBackprop(model) attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor) visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam']) ############################################################################## # END OF YOUR CODE # ############################################################################## # Try out different layers and see observe how the attributions change layer = model.features[3] # Example visualization for using layer visualizations # layer_act = LayerActivation(model, layer) # layer_act_attr = compute_attributions(layer_act, X_tensor) # layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True) ############################################################################## # TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar # # to what we did for the other captum sections, using our helper methods), # # but with some preprocessing calculations. # # # # You can refer to the LayerActivation example above and you should be # # using 'layer' given above for this section # # # # Also note that, you would need to customize your 'attr_preprocess' # # parameter that you send along to 'visualize_attr_maps' as the default # # 'attr_preprocess' is written to only to handle multi channel attributions. # # # # For layer gradcam look at the usage of the parameter relu_attributions # ############################################################################## # Layer gradcam aggregates across all channels from captum.attr import LayerAttribution N, C, H, W = X_tensor.shape LC = LayerConductance(model, layer) LC_attr = compute_attributions(LC, X_tensor, target = y_tensor) LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True) LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) ) LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance']) LGC = LayerGradCam(model, layer) LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor) LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True) LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W)) LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1) visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam']) ############################################################################## # END OF YOUR CODE # ##############################################################################
41.623596
137
0.626535
0a1e494933ae306f17bb20205df33acd66dcd6cb
3,713
py
Python
src/genotypes.py
k8lion/admmdarts
4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776
[ "Apache-2.0" ]
null
null
null
src/genotypes.py
k8lion/admmdarts
4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776
[ "Apache-2.0" ]
null
null
null
src/genotypes.py
k8lion/admmdarts
4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776
[ "Apache-2.0" ]
null
null
null
from collections import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') PRIMITIVES = [ 'none', 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] CRBPRIMITIVES = [ 'max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3', 'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] NASNet = Genotype( normal=[ ('sep_conv_5x5', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 0), ('sep_conv_3x3', 0), ('avg_pool_3x3', 1), ('skip_connect', 0), ('avg_pool_3x3', 0), ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('skip_connect', 1), ], normal_concat=[2, 3, 4, 5, 6], reduce=[ ('sep_conv_5x5', 1), ('sep_conv_7x7', 0), ('max_pool_3x3', 1), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('sep_conv_5x5', 0), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 2), ('max_pool_3x3', 1), ], reduce_concat=[4, 5, 6], ) AmoebaNet = Genotype( normal=[ ('avg_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_5x5', 2), ('sep_conv_3x3', 0), ('avg_pool_3x3', 3), ('sep_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('avg_pool_3x3', 1), ], normal_concat=[4, 5, 6], reduce=[ ('avg_pool_3x3', 0), ('sep_conv_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_7x7', 2), ('sep_conv_7x7', 0), ('avg_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('conv_7x1_1x7', 0), ('sep_conv_3x3', 5), ], reduce_concat=[3, 4, 6] ) DARTS_V1 = Genotype( normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) DARTS_V2 = Genotype( normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) DARTS = DARTS_V2 BATH = Genotype( normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0), ('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3), ('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6)) BATH2 = Genotype( normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6), reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1), ('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2, 6))
34.700935
116
0.546458
0a1eaf6b7e32695b5e6a96b0eee80707d820de35
9,462
py
Python
colab_logica.py
smdesai/logica
ad099bcd6064e38e9c2bc9a99564832857c0768c
[ "Apache-2.0" ]
null
null
null
colab_logica.py
smdesai/logica
ad099bcd6064e38e9c2bc9a99564832857c0768c
[ "Apache-2.0" ]
null
null
null
colab_logica.py
smdesai/logica
ad099bcd6064e38e9c2bc9a99564832857c0768c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library for using Logica in CoLab.""" from .common import color from .common import concertina_lib from .compiler import functors from .compiler import rule_translate from .compiler import universe import IPython from IPython.core.magic import register_cell_magic from IPython.display import display import os import pandas from .parser_py import parse from .common import sqlite3_logica BQ_READY = True # By default. try: from google.cloud import bigquery except: BQ_READY = False print('Could not import google.cloud.bigquery.') try: from google.colab import auth except: BQ_READY = False print('Could not import google.cloud.auth.') try: from google.colab import widgets WIDGETS_IMPORTED = True except: WIDGETS_IMPORTED = False print('Could not import google.colab.widgets.') PROJECT = None # TODO: Should this be renamed to PSQL_ENGINE, PSQL_CONNECTION? DB_ENGINE = None DB_CONNECTION = None USER_AUTHENTICATED = False TABULATED_OUTPUT = True SHOW_FULL_QUERY = True PREAMBLE = None if not WIDGETS_IMPORTED: SetTabulatedOutput(False) def TabBar(*args): """Returns a real TabBar or a mock. Useful for UIs that don't support JS.""" if TABULATED_OUTPUT: return widgets.TabBar(*args) return MockTabBar() def ShowError(error_text): print(color.Format('[ {error}Error{end} ] ' + error_text)) def Logica(line, cell, run_query): """Running Logica predicates and storing results.""" predicates = ParseList(line) if not predicates: ShowError('No predicates to run.') return try: program = ';\n'.join(s for s in [PREAMBLE, cell] if s) parsed_rules = parse.ParseFile(program)['rule'] except parse.ParsingException as e: e.ShowMessage() return try: program = universe.LogicaProgram(parsed_rules) except functors.FunctorError as e: e.ShowMessage() return engine = program.annotations.Engine() if engine == 'bigquery' and not BQ_READY: ShowError( 'BigQuery client and/or authentification is not installed. \n' 'It is the easiest to run BigQuery requests from Google CoLab:\n' ' https://colab.research.google.com/.\n' 'Note that running Logica on SQLite requires no installation.\n' 'This could be a good fit for working with small data or learning Logica.\n' 'Use {warning}@Engine("sqlite");{end} annotation in your program to use SQLite.') return bar = TabBar(predicates + ['(Log)']) logs_idx = len(predicates) executions = [] sub_bars = [] ip = IPython.get_ipython() for idx, predicate in enumerate(predicates): with bar.output_to(logs_idx): try: sql = program.FormattedPredicateSql(predicate) executions.append(program.execution) ip.push({predicate + '_sql': sql}) except rule_translate.RuleCompileException as e: print('Encountered error when compiling %s.' % predicate) e.ShowMessage() return # Publish output to Colab cell. with bar.output_to(idx): sub_bar = TabBar(['SQL', 'Result']) sub_bars.append(sub_bar) with sub_bar.output_to(0): if SHOW_FULL_QUERY: print( color.Format( 'The following query is stored at {warning}%s{end} ' 'variable.' % ( predicate + '_sql'))) print(sql) else: print('Query is stored at %s variable.' % color.Warn(predicate + '_sql')) with bar.output_to(logs_idx): if engine == 'sqlite': sql_runner = SqliteRunner() elif engine == 'psql': sql_runner = PostgresRunner() elif engine == 'bigquery': EnsureAuthenticatedUser() sql_runner = RunSQL else: raise Exception('Logica only supports BigQuery, PostgreSQL and SQLite ' 'for now.') result_map = concertina_lib.ExecuteLogicaProgram( executions, sql_runner=sql_runner, sql_engine=engine) for idx, predicate in enumerate(predicates): t = result_map[predicate] ip.push({predicate: t}) with bar.output_to(idx): with sub_bars[idx].output_to(1): if run_query: print( color.Format( 'The following table is stored at {warning}%s{end} ' 'variable.' % predicate)) display(t) else: print('The query was not run.') print(' ') # To activate the tabbar.
28.672727
92
0.686007
0a1ed95ecf3a94b0314f7b8f523edacf4c486e8a
275
py
Python
pyccel/ast/basic.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
pyccel/ast/basic.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
pyccel/ast/basic.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
from sympy.core.basic import Basic as sp_Basic
18.333333
46
0.6
0a20c183c03d4133fca24e84a8755331075102c6
1,195
py
Python
alibi_detect/utils/tests/test_discretize.py
Clusks/alibi-detect
b39406a6cf88f315f401562d4fea93a42aa6dcc1
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
1,227
2019-11-19T15:38:40.000Z
2022-03-31T11:18:32.000Z
alibi_detect/utils/tests/test_discretize.py
Clusks/alibi-detect
b39406a6cf88f315f401562d4fea93a42aa6dcc1
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
323
2019-11-21T18:41:00.000Z
2022-03-31T21:08:56.000Z
alibi_detect/utils/tests/test_discretize.py
Clusks/alibi-detect
b39406a6cf88f315f401562d4fea93a42aa6dcc1
[ "ECL-2.0", "Apache-2.0", "CC0-1.0" ]
133
2019-11-19T14:23:23.000Z
2022-03-31T07:55:43.000Z
from itertools import product import numpy as np import pytest from alibi_detect.utils.discretizer import Discretizer x = np.random.rand(10, 4) n_features = x.shape[1] feature_names = [str(_) for _ in range(n_features)] categorical_features = [[], [1, 3]] percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))] tests = list(product(categorical_features, percentiles)) n_tests = len(tests)
31.447368
85
0.663598
0a21ba878c2e6396a56688811ff51897970088c4
3,361
py
Python
tinc/tests/parameter_space_test.py
AlloSphere-Research-Group/tinc-python
4c3390df9911a391833244de1eb1d33a2e19d330
[ "BSD-3-Clause" ]
1
2020-11-23T22:42:50.000Z
2020-11-23T22:42:50.000Z
tinc/tests/parameter_space_test.py
AlloSphere-Research-Group/tinc-python
4c3390df9911a391833244de1eb1d33a2e19d330
[ "BSD-3-Clause" ]
null
null
null
tinc/tests/parameter_space_test.py
AlloSphere-Research-Group/tinc-python
4c3390df9911a391833244de1eb1d33a2e19d330
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Jun 14 11:49:43 2021 @author: Andres """ import sys,time import unittest from tinc import * if __name__ == '__main__': unittest.main()
28.974138
86
0.555489