hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
13422bb3478f929cfdd7d39790b4b35df0ba961e
7,034
py
Python
src/infer/_ExtractSimpleDeformTTA.py
RamsteinWR/PneumoniaRSNA1
08bdba51292307a78ef711c6be4a63faea240ddf
[ "MIT" ]
null
null
null
src/infer/_ExtractSimpleDeformTTA.py
RamsteinWR/PneumoniaRSNA1
08bdba51292307a78ef711c6be4a63faea240ddf
[ "MIT" ]
null
null
null
src/infer/_ExtractSimpleDeformTTA.py
RamsteinWR/PneumoniaRSNA1
08bdba51292307a78ef711c6be4a63faea240ddf
[ "MIT" ]
null
null
null
import json import os import re import numpy as np import pandas as pd from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR WDIR = os.path.dirname(os.path.abspath(__file__)) def flip_box(box): """ box (list, length 4): [x1, y1, w, h] """ # Get top right corner of prediction x1 = box[0] y1 = box[1] w = box[2] h = box[3] topRight = (x1 + w, y1) # Top left corner of flipped box is: newTopLeft = (1024. - topRight[0], topRight[1]) return [newTopLeft[0], newTopLeft[1], w, h] with open(MAPPINGS_PATH) as f: mapping = json.load(f) with open(MAPPINGS_PATH.replace(test_image_set, "{}_flip".format(test_image_set))) as f: flip_mapping = json.load(f) metadata = pd.read_csv(METADATA_PATH) execfile(os.path.join(WDIR, "DetectionEnsemble.py")) imsizes = [224, 256, 288, 320, 352, 384, 416, 448, 480, 512] fold0_nom = "fold{}_{}".format(0, imsizes[0]) fold1_nom = "fold{}_{}".format(1, imsizes[1]) fold2_nom = "fold{}_{}".format(2, imsizes[2]) fold3_nom = "fold{}_{}".format(3, imsizes[3]) fold4_nom = "fold{}_{}".format(4, imsizes[4]) fold5_nom = "fold{}_{}".format(5, imsizes[5]) fold6_nom = "fold{}_{}".format(6, imsizes[6]) fold7_nom = "fold{}_{}".format(7, imsizes[7]) fold8_nom = "fold{}_{}".format(8, imsizes[8]) fold9_nom = "fold{}_{}".format(9, imsizes[9]) fold1RCNN0 = run_ensemble(get_TTA_results("fold1_256", test_image_set, RCNN0_DETS_DIR.format(fold1_nom)), metadata) fold3RCNN0 = run_ensemble(get_TTA_results("fold3_320", test_image_set, RCNN0_DETS_DIR.format(fold3_nom)), metadata) fold5RCNN0 = run_ensemble(get_TTA_results("fold5_384", test_image_set, RCNN0_DETS_DIR.format(fold5_nom)), metadata) fold7RCNN0 = run_ensemble(get_TTA_results("fold7_448", test_image_set, RCNN0_DETS_DIR.format(fold7_nom)), metadata) fold9RCNN0 = run_ensemble(get_TTA_results("fold9_512", test_image_set, RCNN0_DETS_DIR.format(fold9_nom)), metadata) list_of_dfs = [fold1RCNN0, fold3RCNN0, fold5RCNN0, fold7RCNN0, fold9RCNN0] final_TTA_ensemble = run_ensemble(list_of_dfs, metadata, adjust_score=False) final_TTA_ensemble["adjustedScore"] = final_TTA_ensemble.score * final_TTA_ensemble.votes final_TTA_ensemble = final_TTA_ensemble[["patientId", "x", "y", "w", "h", "score", "votes", "adjustedScore"]] final_TTA_ensemble.to_csv(os.path.join(WDIR, "../../SimpleDCNPredictions.csv"), index=False)
44.238994
115
0.598806
1342626b945cae1f2c60f3de7811ba70848e89f4
1,896
py
Python
pool4.py
yfii/yfiiapi
2c0341b66108f99005dc5a40e3d1d30267f50bb5
[ "MIT" ]
4
2020-09-11T12:31:37.000Z
2020-12-14T04:42:05.000Z
pool4.py
yfii/yfiiapi
2c0341b66108f99005dc5a40e3d1d30267f50bb5
[ "MIT" ]
1
2020-10-07T11:03:07.000Z
2020-10-07T11:03:07.000Z
pool4.py
yfii/yfiiapi
2c0341b66108f99005dc5a40e3d1d30267f50bb5
[ "MIT" ]
9
2020-09-25T17:54:50.000Z
2021-06-05T05:36:14.000Z
from web3 import Web3, HTTPProvider import json w3url = "https://mainnet.infura.io/v3/998f64f3627548bbaf2630599c1eefca" w3 = Web3(HTTPProvider(w3url)) WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" YFII = "0xa1d0E215a23d7030842FC67cE582a6aFa3CCaB83" DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F" iUSDT = "0x72Cf258c852Dc485a853370171d46B9D29fD3184" POOL4 = "0x3d367C9529f260B0661e1C1E91167C9319ee96cA" yfii2dai = [YFII, WETH, DAI] with open("abi/erc20.json") as f: erc20ABI = json.loads(f.read()) with open("abi/uniswapRouterv2.json") as f: uniswapABI = json.loads(f.read()) with open("abi/pool4.json") as f: pool4ABI = json.loads(f.read()) uniswap_instance = w3.eth.contract( abi=uniswapABI, address=w3.toChecksumAddress("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"), ) pool4_instance = w3.eth.contract(abi=pool4ABI, address=POOL4) if __name__ == "__main__": print(getDATA())
29.169231
86
0.730485
13427c6b2da4367eae23b3d65430a2f826b58232
106,827
py
Python
obswebsocket/requests.py
PanBartosz/obs-websocket-py
e92960a475d3f1096a4ea41763cbc776b23f0a37
[ "MIT" ]
123
2017-06-19T05:34:58.000Z
2022-03-23T12:48:19.000Z
obswebsocket/requests.py
PanBartosz/obs-websocket-py
e92960a475d3f1096a4ea41763cbc776b23f0a37
[ "MIT" ]
67
2017-06-20T11:31:08.000Z
2022-03-25T20:30:41.000Z
obswebsocket/requests.py
PanBartosz/obs-websocket-py
e92960a475d3f1096a4ea41763cbc776b23f0a37
[ "MIT" ]
51
2017-10-09T19:03:12.000Z
2022-03-28T19:25:02.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT # # (Generated on 2020-12-20 18:26:33.661372) # from .base_classes import Baserequests
29.682412
417
0.601075
134494ea81ba3a532d6b1f91e17ecbbfebb61032
157
py
Python
simple_history/tests/custom_user/admin.py
rdurica/django-simple-history
84d17f40be68e9ac7744b773451be83720c4c13a
[ "BSD-3-Clause" ]
911
2015-01-05T13:21:17.000Z
2020-06-07T07:11:53.000Z
simple_history/tests/custom_user/admin.py
rdurica/django-simple-history
84d17f40be68e9ac7744b773451be83720c4c13a
[ "BSD-3-Clause" ]
492
2015-01-01T18:20:20.000Z
2020-06-06T17:34:01.000Z
simple_history/tests/custom_user/admin.py
rdurica/django-simple-history
84d17f40be68e9ac7744b773451be83720c4c13a
[ "BSD-3-Clause" ]
307
2015-02-01T00:45:51.000Z
2020-06-06T15:39:29.000Z
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from .models import CustomUser admin.site.register(CustomUser, UserAdmin)
22.428571
47
0.834395
1345874b0ae4768978973e82f88c986754ca58f9
7,109
py
Python
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py
Pandinosaurus/open_model_zoo
2543996541346418919c5cddfb71e33e2cdef080
[ "Apache-2.0" ]
1
2019-05-31T14:01:42.000Z
2019-05-31T14:01:42.000Z
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py
Pandinosaurus/open_model_zoo
2543996541346418919c5cddfb71e33e2cdef080
[ "Apache-2.0" ]
null
null
null
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py
Pandinosaurus/open_model_zoo
2543996541346418919c5cddfb71e33e2cdef080
[ "Apache-2.0" ]
null
null
null
""" Copyright (c) 2018-2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import OrderedDict import cv2 import numpy as np from ...adapters import MTCNNPAdapter
39.494444
120
0.623435
134646519d68e17184a83f90eaeb23182da3950c
8,849
py
Python
pytests/Atomicity/basic_ops.py
ashwin2002/TAF
4223787a1f4c0fe9fa841543020b48ada9ade9e3
[ "Apache-2.0" ]
null
null
null
pytests/Atomicity/basic_ops.py
ashwin2002/TAF
4223787a1f4c0fe9fa841543020b48ada9ade9e3
[ "Apache-2.0" ]
null
null
null
pytests/Atomicity/basic_ops.py
ashwin2002/TAF
4223787a1f4c0fe9fa841543020b48ada9ade9e3
[ "Apache-2.0" ]
null
null
null
from Cb_constants import DocLoading from basetestcase import ClusterSetup from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator from couchbase_helper.tuq_generators import JsonGenerator from remote.remote_util import RemoteMachineShellConnection from sdk_client3 import SDKClient from com.couchbase.client.java.json import JsonObject """ Basic test cases with commit,rollback scenarios """
42.138095
79
0.600181
1346bfc37ba0726e8df79447049dc235a411088d
509
py
Python
reverseWord.py
lovefov/Python
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
[ "MIT" ]
null
null
null
reverseWord.py
lovefov/Python
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
[ "MIT" ]
null
null
null
reverseWord.py
lovefov/Python
ba8fc49e6e503927dc1f827f37b77f3e43b5d0c8
[ "MIT" ]
1
2021-02-08T08:48:44.000Z
2021-02-08T08:48:44.000Z
#!/usr/bin/env python3 #-*- coding:utf-8 -*- #Author: ''' 2.xlen()list 3.x def spin_words(sentence): # Your code goes here return " ".join([x[::-1] if len(x) >= 5 else x for x in sentence.split(" ")]) str[::-1] ok '''
19.576923
81
0.569745
1346de71efda2a56a9fe39787dfce52620463eb1
3,461
py
Python
src/scs_host/comms/network_socket.py
south-coast-science/scs_host_cpc
08b4a28c022936462b60823cca136ba6746eac57
[ "MIT" ]
null
null
null
src/scs_host/comms/network_socket.py
south-coast-science/scs_host_cpc
08b4a28c022936462b60823cca136ba6746eac57
[ "MIT" ]
null
null
null
src/scs_host/comms/network_socket.py
south-coast-science/scs_host_cpc
08b4a28c022936462b60823cca136ba6746eac57
[ "MIT" ]
null
null
null
""" Created on 30 May 2017 @author: Bruno Beloff ([email protected]) A network socket abstraction, implementing ProcessComms """ import socket import time from scs_core.sys.process_comms import ProcessComms # --------------------------------------------------------------------------------------------------------------------
27.251969
118
0.428489
13470b2018f5f54dbcfea9b085e57cd30b1be672
15,182
py
Python
dateparser/date.py
JKhakpour/dateparser
7f324cfd3de04e91752979cf65ae0dedc622375f
[ "BSD-3-Clause" ]
2
2019-03-12T10:50:15.000Z
2021-07-07T14:38:58.000Z
dateparser/date.py
JKhakpour/dateparser
7f324cfd3de04e91752979cf65ae0dedc622375f
[ "BSD-3-Clause" ]
null
null
null
dateparser/date.py
JKhakpour/dateparser
7f324cfd3de04e91752979cf65ae0dedc622375f
[ "BSD-3-Clause" ]
1
2018-03-07T13:25:16.000Z
2018-03-07T13:25:16.000Z
# -*- coding: utf-8 -*- import calendar import collections from datetime import datetime, timedelta from warnings import warn import six import regex as re from dateutil.relativedelta import relativedelta from dateparser.date_parser import date_parser from dateparser.freshness_date_parser import freshness_date_parser from dateparser.languages.loader import LanguageDataLoader from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages from dateparser.conf import apply_settings from dateparser.utils import normalize_unicode, apply_timezone_from_settings APOSTROPHE_LOOK_ALIKE_CHARS = [ u'\N{RIGHT SINGLE QUOTATION MARK}', # u'\u2019' u'\N{MODIFIER LETTER APOSTROPHE}', # u'\u02bc' u'\N{MODIFIER LETTER TURNED COMMA}', # u'\u02bb' u'\N{ARMENIAN APOSTROPHE}', # u'\u055a' u'\N{LATIN SMALL LETTER SALTILLO}', # u'\ua78c' u'\N{PRIME}', # u'\u2032' u'\N{REVERSED PRIME}', # u'\u2035' u'\N{MODIFIER LETTER PRIME}', # u'\u02b9' u'\N{FULLWIDTH APOSTROPHE}', # u'\uff07' ] RE_NBSP = re.compile(u'\xa0', flags=re.UNICODE) RE_SPACES = re.compile(r'\s+') RE_TRIM_SPACES = re.compile(r'^\s+(\S.*?)\s+$') RE_SANITIZE_SKIP = re.compile(r'\t|\n|\r|\u00bb|,\s\u0432|\u200e|\xb7|\u200f|\u064e|\u064f', flags=re.M) RE_SANITIZE_RUSSIAN = re.compile(r'([\W\d])\u0433\.', flags=re.I | re.U) RE_SANITIZE_AMPM = re.compile(r'\b([ap])(\.)?m(\.)?\b', flags=re.DOTALL | re.I) RE_SANITIZE_ON = re.compile(r'^.*?on:\s+(.*)') RE_SANITIZE_APOSTROPHE = re.compile(u'|'.join(APOSTROPHE_LOOK_ALIKE_CHARS)) RE_SEARCH_TIMESTAMP = re.compile(r'^\d{10}(?![^\d.])') def sanitize_date(date_string): date_string = RE_SANITIZE_SKIP.sub(' ', date_string) date_string = RE_SANITIZE_RUSSIAN.sub(r'\1 ', date_string) # remove u'.' (Russian for year) but not in words date_string = sanitize_spaces(date_string) date_string = RE_SANITIZE_AMPM.sub(r'\1m', date_string) date_string = RE_SANITIZE_ON.sub(r'\1', date_string) date_string = RE_SANITIZE_APOSTROPHE.sub(u"'", date_string) return date_string def get_date_from_timestamp(date_string, settings): if RE_SEARCH_TIMESTAMP.search(date_string): date_obj = datetime.fromtimestamp(int(date_string[:10])) date_obj = apply_timezone_from_settings(date_obj, settings) return date_obj def parse_with_formats(date_string, date_formats, settings): """ Parse with formats and return a dictionary with 'period' and 'obj_date'. :returns: :class:`datetime.datetime`, dict or None """ period = 'day' for date_format in date_formats: try: date_obj = datetime.strptime(date_string, date_format) except ValueError: continue else: # If format does not include the day, use last day of the month # instead of first, because the first is usually out of range. if '%d' not in date_format: period = 'month' date_obj = date_obj.replace( day=get_last_day_of_month(date_obj.year, date_obj.month)) if not ('%y' in date_format or '%Y' in date_format): today = datetime.today() date_obj = date_obj.replace(year=today.year) date_obj = apply_timezone_from_settings(date_obj, settings) return {'date_obj': date_obj, 'period': period} else: return {'date_obj': None, 'period': period}
38.338384
114
0.640364
1347a75f9a9bad0cfccf1c5a976700bd26f857d2
8,817
py
Python
src/models/functions/connection/mixture_density_network.py
kristofbc/handwriting-synthesis
16505e89fd7275d4cd3ed9c4388c9f3c153a0397
[ "FTL" ]
null
null
null
src/models/functions/connection/mixture_density_network.py
kristofbc/handwriting-synthesis
16505e89fd7275d4cd3ed9c4388c9f3c153a0397
[ "FTL" ]
null
null
null
src/models/functions/connection/mixture_density_network.py
kristofbc/handwriting-synthesis
16505e89fd7275d4cd3ed9c4388c9f3c153a0397
[ "FTL" ]
null
null
null
import chainer import chainer.functions from chainer.utils import type_check from chainer import cuda from chainer import function import numpy as np #from chainer import function_node from utils import clip_grad #class MixtureDensityNetworkFunction(function_node.FunctionNode): def mixture_density_network(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho): """ Mixture Density Network Output the coefficient params Args: x (Variable): Tensor containing the position [x1, x2, x3] to predict eos (Variable): End-of-stroke prediction pi (Variable): mixture components mu_x1 (Variable): mean of x1 mu_x2 (Variable): mean of x2 s_x1 (Variable): variance of x1 s_x2 (Variable): variance of x2 rho (Variable): correlation parameter Returns: loss (Variable) y (Variable) eos (Variable) pi (Variable) mu_x1 (Variable) mu_x2 (Variable) s_x1 (Variable) s_x2 (Variable) rho (Variable) """ return MixtureDensityNetworkFunction()(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho)
40.260274
142
0.576613
134809d310a2c2bc00124d8b1a5104d5d2cb92b6
939
py
Python
flask__webservers/bootstrap_4__toggle_switch__examples/main.py
DazEB2/SimplePyScripts
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
[ "CC-BY-4.0" ]
null
null
null
flask__webservers/bootstrap_4__toggle_switch__examples/main.py
DazEB2/SimplePyScripts
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
[ "CC-BY-4.0" ]
null
null
null
flask__webservers/bootstrap_4__toggle_switch__examples/main.py
DazEB2/SimplePyScripts
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
[ "CC-BY-4.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' # SOURCE: https://github.com/twbs/bootstrap # SOURCE: https://github.com/gitbrent/bootstrap4-toggle # SOURCE: https://gitbrent.github.io/bootstrap4-toggle/ from flask import Flask, render_template app = Flask(__name__) import logging logging.basicConfig(level=logging.DEBUG) if __name__ == '__main__': app.debug = True # Localhost # port=0 -- random free port # app.run(port=0) app.run( port=5000, # :param threaded: should the process handle each request in a separate # thread? # :param processes: if greater than 1 then handle each request in a new process # up to this maximum number of concurrent processes. threaded=True, ) # # Public IP # app.run(host='0.0.0.0')
22.357143
87
0.631523
134951456249066bc57415ee60860d0f10fe18d8
160
py
Python
dev/phonts/visualization/phonts.py
eragasa/pypospack
21cdecaf3b05c87acc532d992be2c04d85bfbc22
[ "MIT" ]
4
2018-01-18T19:59:56.000Z
2020-08-25T11:56:52.000Z
dev/phonts/visualization/phonts.py
eragasa/pypospack
21cdecaf3b05c87acc532d992be2c04d85bfbc22
[ "MIT" ]
1
2018-04-22T23:02:13.000Z
2018-04-22T23:02:13.000Z
dev/phonts/visualization/phonts.py
eragasa/pypospack
21cdecaf3b05c87acc532d992be2c04d85bfbc22
[ "MIT" ]
1
2019-09-14T07:04:42.000Z
2019-09-14T07:04:42.000Z
import pypospack.io.phonts as phonts # <---- additional classes and functions in which to add top # <---- pypospack.io.phonts if __name__ == "__main__":
20
60
0.6875
13495c72390c53605e37531f81078eacc3f25cd2
30,078
py
Python
omegaconf/_utils.py
sugatoray/omegaconf
edf9e86493a14b0e909e956d9bae59b9861ef9c5
[ "BSD-3-Clause" ]
1,091
2018-09-06T17:27:12.000Z
2022-03-31T13:47:45.000Z
omegaconf/_utils.py
sugatoray/omegaconf
edf9e86493a14b0e909e956d9bae59b9861ef9c5
[ "BSD-3-Clause" ]
624
2019-06-11T20:53:19.000Z
2022-03-30T20:44:25.000Z
omegaconf/_utils.py
sugatoray/omegaconf
edf9e86493a14b0e909e956d9bae59b9861ef9c5
[ "BSD-3-Clause" ]
71
2019-06-14T05:32:45.000Z
2022-03-27T19:52:35.000Z
import copy import os import re import string import sys import warnings from contextlib import contextmanager from enum import Enum from textwrap import dedent from typing import ( Any, Dict, Iterator, List, Optional, Tuple, Type, Union, get_type_hints, ) import yaml from .errors import ( ConfigIndexError, ConfigTypeError, ConfigValueError, GrammarParseError, OmegaConfBaseException, ValidationError, ) from .grammar_parser import SIMPLE_INTERPOLATION_PATTERN, parse try: import dataclasses except ImportError: # pragma: no cover dataclasses = None # type: ignore # pragma: no cover try: import attr except ImportError: # pragma: no cover attr = None # type: ignore # pragma: no cover # Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc. # We begin by matching the head (in these examples: a, a, ..a). # This can be read as "dots followed by any character but `.` or `[`" # Note that a key starting with brackets, like [a], is purposedly *not* # matched here and will instead be handled in the next regex below (this # is to keep this regex simple). KEY_PATH_HEAD = re.compile(r"(\.)*[^.[]*") # Then we match other keys. The following expression matches one key and can # be read as a choice between two syntaxes: # - `.` followed by anything except `.` or `[` (ex: .b, .d) # - `[` followed by anything then `]` (ex: [b], [c]) KEY_PATH_OTHER = re.compile(r"\.([^.[]*)|\[(.*?)\]") # source: https://yaml.org/type/bool.html YAML_BOOL_TYPES = [ "y", "Y", "yes", "Yes", "YES", "n", "N", "no", "No", "NO", "true", "True", "TRUE", "false", "False", "FALSE", "on", "On", "ON", "off", "Off", "OFF", ] # To be used as default value when `None` is not an option. _DEFAULT_MARKER_: Any = Marker("_DEFAULT_MARKER_") def _resolve_optional(type_: Any) -> Tuple[bool, Any]: """Check whether `type_` is equivalent to `typing.Optional[T]` for some T.""" if getattr(type_, "__origin__", None) is Union: args = type_.__args__ if len(args) == 2 and args[1] == type(None): # noqa E721 return True, args[0] if type_ is Any: return True, Any return False, type_ def _is_optional(obj: Any, key: Optional[Union[int, str]] = None) -> bool: """Check `obj` metadata to see if the given node is optional.""" from .base import Container, Node if key is not None: assert isinstance(obj, Container) obj = obj._get_node(key) if isinstance(obj, Node): return obj._is_optional() else: # In case `obj` is not a Node, treat it as optional by default. # This is used in `ListConfig.append` and `ListConfig.insert` # where the appended/inserted value might or might not be a Node. return True def extract_dict_subclass_data(obj: Any, parent: Any) -> Optional[Dict[str, Any]]: """Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values.""" from omegaconf.omegaconf import _maybe_wrap is_type = isinstance(obj, type) obj_type = obj if is_type else type(obj) subclasses_dict = is_dict_subclass(obj_type) if subclasses_dict: warnings.warn( f"Class `{obj_type.__name__}` subclasses `Dict`." + " Subclassing `Dict` in Structured Config classes is deprecated," + " see github.com/omry/omegaconf/issues/663", UserWarning, stacklevel=9, ) if is_type: return None elif subclasses_dict: dict_subclass_data = {} key_type, element_type = get_dict_key_value_types(obj_type) for name, value in obj.items(): is_optional, type_ = _resolve_optional(element_type) type_ = _resolve_forward(type_, obj.__module__) try: dict_subclass_data[name] = _maybe_wrap( ref_type=type_, is_optional=is_optional, key=name, value=value, parent=parent, ) except ValidationError as ex: format_and_raise( node=None, key=name, value=value, cause=ex, msg=str(ex) ) return dict_subclass_data else: return None def get_value_kind( value: Any, strict_interpolation_validation: bool = False ) -> ValueKind: """ Determine the kind of a value Examples: VALUE: "10", "20", True MANDATORY_MISSING: "???" INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]", "ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}" :param value: Input to classify. :param strict_interpolation_validation: If `True`, then when `value` is a string containing "${", it is parsed to validate the interpolation syntax. If `False`, this parsing step is skipped: this is more efficient, but will not detect errors. """ if _is_missing_value(value): return ValueKind.MANDATORY_MISSING value = _get_value(value) # We identify potential interpolations by the presence of "${" in the string. # Note that escaped interpolations (ex: "esc: \${bar}") are identified as # interpolations: this is intended, since they must be processed as interpolations # for the string to be properly un-escaped. # Keep in mind that invalid interpolations will only be detected when # `strict_interpolation_validation` is True. if isinstance(value, str) and "${" in value: if strict_interpolation_validation: # First try the cheap regex matching that detects common interpolations. if SIMPLE_INTERPOLATION_PATTERN.match(value) is None: # If no match, do the more expensive grammar parsing to detect errors. parse(value) return ValueKind.INTERPOLATION else: return ValueKind.VALUE # DEPRECATED: remove in 2.2 # DEPRECATED: remove in 2.2 def is_generic_list(type_: Any) -> bool: """ Checks if a type is a generic list, for example: list returns False typing.List returns False typing.List[T] returns True :param type_: variable type :return: bool """ return is_list_annotation(type_) and get_list_element_type(type_) is not None def is_generic_dict(type_: Any) -> bool: """ Checks if a type is a generic dict, for example: list returns False typing.List returns False typing.List[T] returns True :param type_: variable type :return: bool """ return is_dict_annotation(type_) and len(get_dict_key_value_types(type_)) > 0 def split_key(key: str) -> List[str]: """ Split a full key path into its individual components. This is similar to `key.split(".")` but also works with the getitem syntax: "a.b" -> ["a", "b"] "a[b]" -> ["a, "b"] ".a.b[c].d" -> ["", "a", "b", "c", "d"] "[a].b" -> ["a", "b"] """ # Obtain the first part of the key (in docstring examples: a, a, .a, '') first = KEY_PATH_HEAD.match(key) assert first is not None first_stop = first.span()[1] # `tokens` will contain all elements composing the key. tokens = key[0:first_stop].split(".") # Optimization in case `key` has no other component: we are done. if first_stop == len(key): return tokens if key[first_stop] == "[" and not tokens[-1]: # This is a special case where the first key starts with brackets, e.g. # [a] or ..[a]. In that case there is an extra "" in `tokens` that we # need to get rid of: # [a] -> tokens = [""] but we would like [] # ..[a] -> tokens = ["", "", ""] but we would like ["", ""] tokens.pop() # Identify other key elements (in docstring examples: b, b, b/c/d, b) others = KEY_PATH_OTHER.findall(key[first_stop:]) # There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting # with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]). # Only one group can be non-empty. tokens += [dot_key if dot_key else bracket_key for dot_key, bracket_key in others] return tokens # Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead, # once support for Python 3.6 is dropped).
30.849231
109
0.619423
134ae941abed7aefa64cace4d2b745626ee1b2ee
28,935
py
Python
darc/amber_clustering.py
loostrum/darc
977f43652ff4fc873340d09ac0fddeb81b889541
[ "Apache-2.0" ]
null
null
null
darc/amber_clustering.py
loostrum/darc
977f43652ff4fc873340d09ac0fddeb81b889541
[ "Apache-2.0" ]
47
2019-08-27T08:07:06.000Z
2022-03-04T10:10:40.000Z
darc/amber_clustering.py
loostrum/darc
977f43652ff4fc873340d09ac0fddeb81b889541
[ "Apache-2.0" ]
1
2020-11-24T09:27:56.000Z
2020-11-24T09:27:56.000Z
#!/usr/bin/env python3 # # AMBER Clustering import os from time import sleep import yaml import ast import threading import multiprocessing as mp import numpy as np from astropy.time import Time, TimeDelta import astropy.units as u from astropy.coordinates import SkyCoord from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT from darc.external import tools from darc import util
44.930124
118
0.55718
134b9c14653c7fb4e1904d66229452a0dbe85152
9,811
py
Python
tools/load_demo_data.py
glenn2763/skyportal
79dc11bfe08076d9c1f920bad85681ab001e22c8
[ "BSD-3-Clause" ]
null
null
null
tools/load_demo_data.py
glenn2763/skyportal
79dc11bfe08076d9c1f920bad85681ab001e22c8
[ "BSD-3-Clause" ]
null
null
null
tools/load_demo_data.py
glenn2763/skyportal
79dc11bfe08076d9c1f920bad85681ab001e22c8
[ "BSD-3-Clause" ]
null
null
null
import datetime import os import subprocess import base64 from pathlib import Path import shutil import pandas as pd import signal import requests from baselayer.app.env import load_env from baselayer.app.model_util import status, create_tables, drop_tables from social_tornado.models import TornadoStorage from skyportal.models import init_db, Base, DBSession, Source, User from skyportal.model_util import setup_permissions, create_token from skyportal.tests import api from baselayer.tools.test_frontend import verify_server_availability if __name__ == "__main__": """Insert test data""" env, cfg = load_env() basedir = Path(os.path.dirname(__file__)) / ".." with status(f"Connecting to database {cfg['database']['database']}"): init_db(**cfg["database"]) with status("Dropping all tables"): drop_tables() with status("Creating tables"): create_tables() for model in Base.metadata.tables: print(" -", model) with status(f"Creating permissions"): setup_permissions() with status(f"Creating dummy users"): super_admin_user = User( username="[email protected]", role_ids=["Super admin"] ) group_admin_user = User( username="[email protected]", role_ids=["Super admin"] ) full_user = User(username="[email protected]", role_ids=["Full user"]) view_only_user = User( username="[email protected]", role_ids=["View only"] ) DBSession().add_all( [super_admin_user, group_admin_user, full_user, view_only_user] ) for u in [super_admin_user, group_admin_user, full_user, view_only_user]: DBSession().add( TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2") ) with status("Creating token"): token = create_token( [ "Manage groups", "Manage sources", "Upload data", "Comment", "Manage users", ], super_admin_user.id, "load_demo_data token", ) with status("Launching web app & executing API calls"): try: response_status, data = api("GET", "sysinfo", token=token) app_already_running = True except requests.ConnectionError: app_already_running = False web_client = subprocess.Popen( ["make", "run"], cwd=basedir, preexec_fn=os.setsid ) server_url = f"http://localhost:{cfg['ports.app']}" print() print(f"Waiting for server to appear at {server_url}...") try: verify_server_availability(server_url) print("App running - continuing with API calls") with status("Creating dummy group & adding users"): data = assert_post( "groups", data={ "name": "Stream A", "group_admins": [ super_admin_user.username, group_admin_user.username, ], }, ) group_id = data["data"]["id"] for u in [view_only_user, full_user]: data = assert_post( f"groups/{group_id}/users/{u.username}", data={"admin": False} ) with status("Creating dummy instruments"): data = assert_post( "telescope", data={ "name": "Palomar 1.5m", "nickname": "P60", "lat": 33.3633675, "lon": -116.8361345, "elevation": 1870, "diameter": 1.5, "group_ids": [group_id], }, ) telescope1_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "P60 Camera", "type": "phot", "band": "optical", "telescope_id": telescope1_id, }, ) instrument1_id = data["data"]["id"] data = assert_post( "telescope", data={ "name": "Nordic Optical Telescope", "nickname": "NOT", "lat": 28.75, "lon": 17.88, "elevation": 1870, "diameter": 2.56, "group_ids": [group_id], }, ) telescope2_id = data["data"]["id"] data = assert_post( "instrument", data={ "name": "ALFOSC", "type": "both", "band": "optical", "telescope_id": telescope2_id, }, ) with status("Creating dummy sources"): SOURCES = [ { "id": "14gqr", "ra": 353.36647, "dec": 33.646149, "redshift": 0.063, "group_ids": [group_id], "comments": [ "No source at transient location to R>26 in LRIS imaging", "Strong calcium lines have emerged.", ], }, { "id": "16fil", "ra": 322.718872, "dec": 27.574113, "redshift": 0.0, "group_ids": [group_id], "comments": ["Frogs in the pond", "The eagle has landed"], }, ] (basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True) for source_info in SOURCES: comments = source_info.pop("comments") data = assert_post("sources", data=source_info) assert data["data"]["id"] == source_info["id"] for comment in comments: data = assert_post( "comment", data={"source_id": source_info["id"], "text": comment}, ) phot_file = basedir / "skyportal/tests/data/phot.csv" phot_data = pd.read_csv(phot_file) data = assert_post( "photometry", data={ "source_id": source_info["id"], "time_format": "iso", "time_scale": "utc", "instrument_id": instrument1_id, "observed_at": phot_data.observed_at.tolist(), "mag": phot_data.mag.tolist(), "e_mag": phot_data.e_mag.tolist(), "lim_mag": phot_data.lim_mag.tolist(), "filter": phot_data["filter"].tolist(), }, ) spec_file = os.path.join( os.path.dirname(os.path.dirname(__file__)), "skyportal", "tests", "data", "spec.csv", ) spec_data = pd.read_csv(spec_file) for i, df in spec_data.groupby("instrument_id"): data = assert_post( "spectrum", data={ "source_id": source_info["id"], "observed_at": str(datetime.datetime(2014, 10, 24)), "instrument_id": 1, "wavelengths": df.wavelength.tolist(), "fluxes": df.flux.tolist(), }, ) for ttype in ["new", "ref", "sub"]: fname = f'{source_info["id"]}_{ttype}.png' fpath = basedir / f"skyportal/tests/data/{fname}" thumbnail_data = base64.b64encode( open(os.path.abspath(fpath), "rb").read() ) data = assert_post( "thumbnail", data={ "source_id": source_info["id"], "data": thumbnail_data, "ttype": ttype, }, ) source = Source.query.get(source_info["id"]) source.add_linked_thumbnails() finally: if not app_already_running: print("Terminating web app") os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
37.446565
88
0.428804
134f9288b26ce3d17a1dc1a42f04cbaea4914dea
1,537
py
Python
framework/Exploits/CUTEFLOW_0024.py
UncleWillis/BugBox
25682f25fc3222db383649a4924bcd65f2ddcb34
[ "BSD-3-Clause" ]
1
2019-01-25T21:32:42.000Z
2019-01-25T21:32:42.000Z
framework/Exploits/CUTEFLOW_0024.py
UncleWillis/BugBox
25682f25fc3222db383649a4924bcd65f2ddcb34
[ "BSD-3-Clause" ]
null
null
null
framework/Exploits/CUTEFLOW_0024.py
UncleWillis/BugBox
25682f25fc3222db383649a4924bcd65f2ddcb34
[ "BSD-3-Clause" ]
1
2021-06-23T04:44:25.000Z
2021-06-23T04:44:25.000Z
# Copyright 2013 University of Maryland. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE.TXT file. import sys import os import time from selenium.common.exceptions import NoAlertPresentException import framework
29
134
0.582954
134fbec769aed9e0795c724b3dcc54286150a284
10,720
py
Python
telethon/tl/custom/button.py
HosseyNJF/Telethon
0b0a1dc6a1a3f2fc8593526549889fba2884e8b8
[ "MIT" ]
4
2020-11-28T08:50:07.000Z
2020-12-13T03:44:05.000Z
telethon/tl/custom/button.py
HosseyNJF/Telethon
0b0a1dc6a1a3f2fc8593526549889fba2884e8b8
[ "MIT" ]
4
2020-10-11T15:40:17.000Z
2020-10-22T09:06:58.000Z
telethon/tl/custom/button.py
HosseyNJF/Telethon
0b0a1dc6a1a3f2fc8593526549889fba2884e8b8
[ "MIT" ]
2
2020-01-16T12:21:02.000Z
2021-12-16T01:30:11.000Z
from .. import types from ... import utils
40.916031
79
0.638619
134fdc98faac6c7e555a1d8a47d4c15e48a09ce5
1,575
py
Python
src/main/resources/pys/join.py
addUsername/javaBoring
d576adbd21447085f56719e8cc871faf94d8a369
[ "MIT" ]
null
null
null
src/main/resources/pys/join.py
addUsername/javaBoring
d576adbd21447085f56719e8cc871faf94d8a369
[ "MIT" ]
null
null
null
src/main/resources/pys/join.py
addUsername/javaBoring
d576adbd21447085f56719e8cc871faf94d8a369
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Jul 7 20:14:22 2020 Simple script to join json files @author: SERGI """ import json import sys import os if __name__ == "__main__": print("hello from python", flush=True) jsonPath = str(sys.argv[1]) # ============================================================================= # jsonPath = "../eclipse-workspace/prueba/target/json/" # ============================================================================= jsonPathTemp = jsonPath+"temp/" arr = os.listdir(jsonPathTemp) arr.sort() print(arr) dict_to_json = {} dict_0 = readJson(jsonPathTemp + arr[0]) dict_1 = readJson(jsonPathTemp + arr[1]) dict_2 = readJson(jsonPathTemp + arr[2]) dict_3 = readJson(jsonPathTemp + arr[3]) keys = [name for name in dict_0.keys() if "0" not in name] for key in keys: dict_to_json[key] = dict_0[key] + dict_1[key] + dict_2[key] + dict_3[key] #0seg,f_step,f_stop seg = dict_0['0seg,f_step,f_stop'][0] step = dict_0['0seg,f_step,f_stop'][1] stop = dict_3['0seg,f_step,f_stop'][2] dict_to_json['0seg,f_step,f_stop'] = [seg, step, stop] print("Escribiendo json: ", jsonPath+arr[0], flush=True) writeJson(jsonPath+arr[0], dict_to_json) print("finish", flush=True)
28.125
82
0.533333
1351156463c6a25e82b73e57ee20368b3d832d46
2,017
py
Python
app/grandchallenge/challenges/migrations/0023_auto_20200123_1102.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
1
2021-02-09T10:30:44.000Z
2021-02-09T10:30:44.000Z
app/grandchallenge/challenges/migrations/0023_auto_20200123_1102.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
null
null
null
app/grandchallenge/challenges/migrations/0023_auto_20200123_1102.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
null
null
null
# Generated by Django 3.0.2 on 2020-01-23 11:02 import re import django.contrib.postgres.fields.citext import django.core.validators from django.db import migrations import grandchallenge.challenges.models
36.017857
103
0.543877
13513f9e059c5209134cdb28c07c7e40eb9e7c97
8,756
py
Python
autosk_dev_test/component/LinReg.py
hmendozap/master-arbeit-files
5c1b90bc4a424313234b84bad405799de6f8d2ed
[ "MIT" ]
2
2018-01-18T06:25:21.000Z
2018-12-11T07:43:09.000Z
autosk_dev_test/component/LinReg.py
hmendozap/master-arbeit-files
5c1b90bc4a424313234b84bad405799de6f8d2ed
[ "MIT" ]
1
2016-03-29T07:55:18.000Z
2016-03-29T07:55:18.000Z
autosk_dev_test/component/LinReg.py
hmendozap/master-arbeit-files
5c1b90bc4a424313234b84bad405799de6f8d2ed
[ "MIT" ]
null
null
null
import numpy as np import scipy.sparse as sp from HPOlibConfigSpace.configuration_space import ConfigurationSpace from HPOlibConfigSpace.conditions import EqualsCondition, InCondition from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \ UniformIntegerHyperparameter, CategoricalHyperparameter, Constant from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm from autosklearn.pipeline.constants import *
44.222222
102
0.489721
13526b6d17cc702b79611d77af35c9e1a3a2bf4e
278
py
Python
python_work/Chapter5/exe3_alien_color.py
Elektra-2/python_crash_course_2nd
1c8beaddfe037faa3a36e7c384a6ea2f9d560060
[ "MIT" ]
1
2020-08-25T18:42:30.000Z
2020-08-25T18:42:30.000Z
python_work/Chapter5/exe3_alien_color.py
Elektra-2/python_crash_course_2nd
1c8beaddfe037faa3a36e7c384a6ea2f9d560060
[ "MIT" ]
null
null
null
python_work/Chapter5/exe3_alien_color.py
Elektra-2/python_crash_course_2nd
1c8beaddfe037faa3a36e7c384a6ea2f9d560060
[ "MIT" ]
null
null
null
# Creating a elif chain alien_color = 'red' if alien_color == 'green': print('Congratulations! You won 5 points!') elif alien_color == 'yellow': print('Congratulations! You won 10 points!') elif alien_color == 'red': print('Congratulations! You won 15 points!')
25.272727
48
0.683453
13532d8edfc3e8c0a315f5cb2ba2e9ad01f479b5
2,427
py
Python
DigiPsych_API/Data_Science_API/evaluate_model.py
larryzhang95/Voice-Analysis-Pipeline
264ac5c70d0baab47b81718ea5b895be30a683e9
[ "Apache-2.0" ]
7
2019-06-22T21:03:50.000Z
2021-11-21T19:46:55.000Z
DigiPsych_API/Data_Science_API/evaluate_model.py
larryzhang95/Voice-Analysis-Pipeline
264ac5c70d0baab47b81718ea5b895be30a683e9
[ "Apache-2.0" ]
null
null
null
DigiPsych_API/Data_Science_API/evaluate_model.py
larryzhang95/Voice-Analysis-Pipeline
264ac5c70d0baab47b81718ea5b895be30a683e9
[ "Apache-2.0" ]
3
2019-09-15T01:50:39.000Z
2021-12-22T02:36:36.000Z
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import learning_curve # Plot learning curve # Plot validation curve
42.578947
115
0.674083
13539ceb0c44529e049d271d3d684efaaf1e9dee
121
py
Python
oomi/__init__.py
sremes/oomi
312317aa2ef68f1481b2447652a7d47c5f2e3f56
[ "MIT" ]
null
null
null
oomi/__init__.py
sremes/oomi
312317aa2ef68f1481b2447652a7d47c5f2e3f56
[ "MIT" ]
null
null
null
oomi/__init__.py
sremes/oomi
312317aa2ef68f1481b2447652a7d47c5f2e3f56
[ "MIT" ]
null
null
null
"""Utilities for downloading comsumption data from Oomi.""" from oomi.oomi_downloader import OomiDownloader, OomiConfig
30.25
59
0.818182
1354ab456ba9280a4363560bf56997305ed54a5f
3,692
py
Python
BaseTools/Source/Python/UPT/Object/Parser/InfMisc.py
KaoTuz/edk2-stable202108
49d9306e7bf64b2f07d8473be1f2faea49d0a012
[ "Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "BSD-2-Clause-Patent", "BSD-3-Clause" ]
9
2021-07-26T17:02:51.000Z
2021-12-30T10:49:46.000Z
BaseTools/Source/Python/UPT/Object/Parser/InfMisc.py
ESdove/edk2_exploring
34ff32b45f43d233d9696e7c8e3de68ea3000a7b
[ "Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "BSD-2-Clause-Patent", "BSD-3-Clause" ]
null
null
null
BaseTools/Source/Python/UPT/Object/Parser/InfMisc.py
ESdove/edk2_exploring
34ff32b45f43d233d9696e7c8e3de68ea3000a7b
[ "Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "BSD-2-Clause-Patent", "BSD-3-Clause" ]
null
null
null
## @file # This file is used to define class objects of INF file miscellaneous. # Include BootMode/HOB/Event and others. It will consumed by InfParser. # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # SPDX-License-Identifier: BSD-2-Clause-Patent ''' InfMisc ''' import Logger.Log as Logger from Logger import ToolError from Library import DataType as DT from Object.Parser.InfCommonObject import InfSectionCommonDef from Library.Misc import Sdict ## # BootModeObject # ## # EventObject # ## # HobObject # ## # InfSpecialCommentObject # ## ErrorInInf # # An encapsulate of Error for INF parser. # def ErrorInInf(Message=None, ErrorCode=None, LineInfo=None, RaiseError=True): if ErrorCode is None: ErrorCode = ToolError.FORMAT_INVALID if LineInfo is None: LineInfo = ['', -1, ''] Logger.Error("InfParser", ErrorCode, Message=Message, File=LineInfo[0], Line=LineInfo[1], ExtraData=LineInfo[2], RaiseError=RaiseError)
25.818182
77
0.628115
13570cde1e5c8c95a0a1cd4eb53d4d9f0d94d653
297
py
Python
21-08/Starters8/1.py
allenalvin333/Codechef_Competitions
44c3626de33cd9e17d1acfc74abe0aab809efbad
[ "MIT" ]
null
null
null
21-08/Starters8/1.py
allenalvin333/Codechef_Competitions
44c3626de33cd9e17d1acfc74abe0aab809efbad
[ "MIT" ]
null
null
null
21-08/Starters8/1.py
allenalvin333/Codechef_Competitions
44c3626de33cd9e17d1acfc74abe0aab809efbad
[ "MIT" ]
null
null
null
# https://www.codechef.com/START8C/problems/PENALTY for T in range(int(input())): n=list(map(int,input().split())) a=b=0 for i in range(len(n)): if(n[i]==1): if(i%2==0): a+=1 else: b+=1 if(a>b): print(1) elif(b>a): print(2) else: print(0)
24.75
51
0.501684
13583e5f15c53f390db50be7afda1b1e9f5ec33e
859
py
Python
util/eval.py
jhong93/vpd
1ed3e8631c46e078ecb9a7756dba1f1c14aead5b
[ "BSD-3-Clause" ]
7
2021-11-26T01:15:23.000Z
2022-03-15T10:51:47.000Z
util/eval.py
jhong93/vpd
1ed3e8631c46e078ecb9a7756dba1f1c14aead5b
[ "BSD-3-Clause" ]
4
2022-01-15T09:46:00.000Z
2022-02-05T07:10:18.000Z
util/eval.py
jhong93/vpd
1ed3e8631c46e078ecb9a7756dba1f1c14aead5b
[ "BSD-3-Clause" ]
1
2021-09-18T16:50:14.000Z
2021-09-18T16:50:14.000Z
import matplotlib.pyplot as plt from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
35.791667
74
0.690338
135906a75c8644d17899cd5046a70589324b71a2
3,595
py
Python
python/py3study/pytorch-lab/demo-cifar.py
sillyemperor/langstudy
937a11d97984e10e4ead54f3b7b7d6a1f2ef24a1
[ "MIT" ]
null
null
null
python/py3study/pytorch-lab/demo-cifar.py
sillyemperor/langstudy
937a11d97984e10e4ead54f3b7b7d6a1f2ef24a1
[ "MIT" ]
null
null
null
python/py3study/pytorch-lab/demo-cifar.py
sillyemperor/langstudy
937a11d97984e10e4ead54f3b7b7d6a1f2ef24a1
[ "MIT" ]
null
null
null
import torch import torchvision import torchvision.transforms as transforms import os.path BASE_DIR = os.path.dirname(os.path.abspath(__file__)) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) root = os.path.join(BASE_DIR, '../data/') trainset = torchvision.datasets.CIFAR10(root=root, train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root=root, train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, shuffle=False, num_workers=2) import torch.nn as nn import torch.nn.functional as F # torch.Size([1, 3, 32, 32]) # torch.Size([1, 6, 14, 14]) # torch.Size([1, 16, 5, 5]) # torch.Size([1, 400]) # torch.Size([1, 120]) # torch.Size([1, 84]) # torch.Size([1, 100]) model = Net() import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9) from util import train_eval train_eval(model, criterion, trainloader, testloader, optimizer, epochs=5) # [1, 5000] loss: 2.293 # [1, 10000] loss: 2.075 # [1, 15000] loss: 1.876 # [1, 20000] loss: 1.754 # [1, 25000] loss: 1.658 # [1, 30000] loss: 1.625 # [1, 35000] loss: 1.558 # [1, 40000] loss: 1.520 # [1, 45000] loss: 1.494 # [1, 50000] loss: 1.459 # 1/5 4456/10000 44.56% (107.18255376815796s) # [2, 5000] loss: 1.413 # [2, 10000] loss: 1.398 # [2, 15000] loss: 1.386 # [2, 20000] loss: 1.379 # [2, 25000] loss: 1.358 # [2, 30000] loss: 1.324 # [2, 35000] loss: 1.333 # [2, 40000] loss: 1.280 # [2, 45000] loss: 1.296 # [2, 50000] loss: 1.304 # 2/5 5357/10000 53.56999999999999% (105.8866639137268s) # [3, 5000] loss: 1.226 # [3, 10000] loss: 1.231 # [3, 15000] loss: 1.215 # [3, 20000] loss: 1.235 # [3, 25000] loss: 1.199 # [3, 30000] loss: 1.187 # [3, 35000] loss: 1.192 # [3, 40000] loss: 1.194 # [3, 45000] loss: 1.196 # [3, 50000] loss: 1.191 # 3/5 5729/10000 57.29% (105.63971090316772s) # [4, 5000] loss: 1.117 # [4, 10000] loss: 1.096 # [4, 15000] loss: 1.121 # [4, 20000] loss: 1.123 # [4, 25000] loss: 1.107 # [4, 30000] loss: 1.120 # [4, 35000] loss: 1.124 # [4, 40000] loss: 1.094 # [4, 45000] loss: 1.105 # [4, 50000] loss: 1.102 # 4/5 5829/10000 58.29% (112.56915497779846s) # [5, 5000] loss: 1.034 # [5, 10000] loss: 1.024 # [5, 15000] loss: 1.040 # [5, 20000] loss: 1.027 # [5, 25000] loss: 1.043 # [5, 30000] loss: 1.049 # [5, 35000] loss: 1.024 # [5, 40000] loss: 1.042 # [5, 45000] loss: 1.027 # [5, 50000] loss: 1.027 # 5/5 6178/10000 61.78% (109.75669193267822s) # 61.0% (541.0347754955292s)
28.307087
75
0.585535
135933f07f224fa858e30bebe4b7db897823355d
995
py
Python
astroquery/neodys/tests/test_neodys_remote.py
B612-Asteroid-Institute/astroquery
4bc8002639e80f7356306f4e000334da5e086091
[ "BSD-3-Clause" ]
null
null
null
astroquery/neodys/tests/test_neodys_remote.py
B612-Asteroid-Institute/astroquery
4bc8002639e80f7356306f4e000334da5e086091
[ "BSD-3-Clause" ]
1
2021-03-19T14:06:50.000Z
2021-03-19T14:06:50.000Z
astroquery/neodys/tests/test_neodys_remote.py
B612-Asteroid-Institute/astroquery
4bc8002639e80f7356306f4e000334da5e086091
[ "BSD-3-Clause" ]
null
null
null
from ... import neodys
41.458333
79
0.711558
1359a8c4afe9581f59876f936fb68313f28865c1
1,028
py
Python
corehq/apps/accounting/migrations/0026_auto_20180508_1956.py
kkrampa/commcare-hq
d64d7cad98b240325ad669ccc7effb07721b4d44
[ "BSD-3-Clause" ]
1
2020-05-05T13:10:01.000Z
2020-05-05T13:10:01.000Z
corehq/apps/accounting/migrations/0026_auto_20180508_1956.py
kkrampa/commcare-hq
d64d7cad98b240325ad669ccc7effb07721b4d44
[ "BSD-3-Clause" ]
1
2019-12-09T14:00:14.000Z
2019-12-09T14:00:14.000Z
corehq/apps/accounting/migrations/0026_auto_20180508_1956.py
MaciejChoromanski/commcare-hq
fd7f65362d56d73b75a2c20d2afeabbc70876867
[ "BSD-3-Clause" ]
5
2015-11-30T13:12:45.000Z
2019-07-01T19:27:07.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2018-05-08 19:56 from __future__ import unicode_literals from __future__ import absolute_import from django.db import migrations
27.783784
80
0.689689
1359c0fa6a8b2dda6889c4d23c5bb6bc6ad1f0c0
9,806
py
Python
tensor2tensor/rl/evaluator.py
SouBanerjee/tensor2tensor
8b88b13dd65bf52b3c27663a128adb7b0a5773fb
[ "Apache-2.0" ]
1
2019-12-11T14:43:49.000Z
2019-12-11T14:43:49.000Z
tensor2tensor/rl/evaluator.py
SouBanerjee/tensor2tensor
8b88b13dd65bf52b3c27663a128adb7b0a5773fb
[ "Apache-2.0" ]
null
null
null
tensor2tensor/rl/evaluator.py
SouBanerjee/tensor2tensor
8b88b13dd65bf52b3c27663a128adb7b0a5773fb
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Evaluation script for RL agents. Example invocation: python -m tensor2tensor.rl.evaluator \ --policy_dir=$HOME/t2t/rl_v1/policy \ --eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \ --hparams_set=rlmb_base \ --hparams='batch_size=64' """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os from tensor2tensor.data_generators import gym_env from tensor2tensor.layers import common_video from tensor2tensor.models.research import rl # pylint: disable=unused-import from tensor2tensor.rl import rl_utils from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import from tensor2tensor.utils import registry from tensor2tensor.utils import trainer_lib import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.") flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.") flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.") flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.") flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.") flags.DEFINE_string( "eval_metrics_dir", "", "Directory to output the eval metrics at." ) flags.DEFINE_bool("full_eval", True, "Whether to ignore the timestep limit.") flags.DEFINE_enum( "agent", "policy", ["random", "policy", "planner"], "Agent type to use." ) flags.DEFINE_bool( "eval_with_learner", True, "Whether to use the PolicyLearner.evaluate function instead of an " "out-of-graph one. Works only with --agent=policy." ) flags.DEFINE_string( "planner_hparams_set", "planner_small", "Planner hparam set." ) flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.") flags.DEFINE_integer( "log_every_steps", 20, "Log every how many environment steps." ) flags.DEFINE_string( "debug_video_path", "", "Path to save the planner debug video at." ) # Unused flags needed to pass for multi-run infrastructure. flags.DEFINE_bool("autotune", False, "Unused here.") flags.DEFINE_string("objective", "", "Unused here.") flags.DEFINE_string("client_handle", "client_0", "Unused.") flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.") flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.") def make_env(env_type, real_env, sim_env_kwargs): """Factory function for envs.""" return { "real": lambda: real_env.new_like( # pylint: disable=g-long-lambda batch_size=sim_env_kwargs["batch_size"], store_rollouts=False, ), "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda **sim_env_kwargs ), }[env_type]() def make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs=None, frame_stack_size=None, planning_horizon=None, rollout_agent_type=None, batch_size=None, num_rollouts=None, inner_batch_size=None, video_writer=None, env_type=None): """Factory function for Agents.""" if batch_size is None: batch_size = env.batch_size return { "random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space ), "policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda batch_size, env.observation_space, env.action_space, policy_hparams, policy_dir, sampling_temp ), "planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda batch_size, make_agent( rollout_agent_type, env, policy_hparams, policy_dir, sampling_temp, batch_size=inner_batch_size ), make_env(env_type, env.env, sim_env_kwargs), lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size), num_rollouts, planning_horizon, discount_factor=policy_hparams.gae_gamma, video_writer=video_writer ), }[agent_type]() def make_eval_fn_with_agent( agent_type, planner_hparams, model_dir, log_every_steps=None, video_writer=None ): """Returns an out-of-graph eval_fn using the Agent API.""" def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp): """Eval function.""" base_env = env env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size) sim_env_kwargs = rl.make_simulated_env_kwargs( base_env, loop_hparams, batch_size=planner_hparams.batch_size, model_dir=model_dir ) agent = make_agent( agent_type, env, policy_hparams, policy_dir, sampling_temp, sim_env_kwargs, loop_hparams.frame_stack_size, planner_hparams.planning_horizon, planner_hparams.rollout_agent_type, num_rollouts=planner_hparams.num_rollouts, inner_batch_size=planner_hparams.batch_size, video_writer=video_writer, env_type=planner_hparams.env_type ) rl_utils.run_rollouts( env, agent, env.reset(), log_every_steps=log_every_steps ) assert len(base_env.current_epoch_rollouts()) == env.batch_size return eval_fn def evaluate( loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, agent_type, eval_with_learner, log_every_steps, debug_video_path, report_fn=None, report_metric=None ): """Evaluate.""" if eval_with_learner: assert agent_type == "policy" if report_fn: assert report_metric is not None eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir) video_writer = None kwargs = {} if not eval_with_learner: if debug_video_path: video_writer = common_video.WholeVideoWriter( fps=10, output_path=debug_video_path, file_format="avi") kwargs["eval_fn"] = make_eval_fn_with_agent( agent_type, planner_hparams, model_dir, log_every_steps=log_every_steps, video_writer=video_writer ) eval_metrics = rl_utils.evaluate_all_configs( loop_hparams, policy_dir, **kwargs ) rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0) if video_writer is not None: video_writer.finish_to_disk() # Report metrics if report_fn: if report_metric == "mean_reward": metric_name = rl_utils.get_metric_name( sampling_temp=loop_hparams.eval_sampling_temps[0], max_num_noops=loop_hparams.eval_max_num_noops, clipped=False ) report_fn(eval_metrics[metric_name], 0) else: report_fn(eval_metrics[report_metric], 0) return eval_metrics def get_game_for_worker(map_name, directory_id): """Get game for the given worker (directory) id.""" if map_name == "v100unfriendly": games = ["chopper_command", "boxing", "asterix", "seaquest"] worker_per_game = 5 elif map_name == "human_nice": games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE worker_per_game = 5 else: raise ValueError("Unknown worker to game map name: %s" % map_name) games.sort() game_id = (directory_id - 1) // worker_per_game tf.logging.info("Getting game %d from %s." % (game_id, games)) return games[game_id] def main(_): now = datetime.datetime.now() now_tag = now.strftime("%Y_%m_%d_%H_%M") loop_hparams = trainer_lib.create_hparams( FLAGS.loop_hparams_set, FLAGS.loop_hparams ) if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1: loop_hparams.game = get_game_for_worker( FLAGS.worker_to_game_map, FLAGS.worker_id + 1) tf.logging.info("Set game to %s." % loop_hparams.game) if FLAGS.full_eval: loop_hparams.eval_rl_env_max_episode_steps = -1 planner_hparams = trainer_lib.create_hparams( FLAGS.planner_hparams_set, FLAGS.planner_hparams ) policy_dir = FLAGS.policy_dir model_dir = FLAGS.model_dir eval_metrics_dir = FLAGS.eval_metrics_dir if FLAGS.output_dir: cur_dir = FLAGS.output_dir if FLAGS.total_num_workers > 1: cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1)) policy_dir = os.path.join(cur_dir, "policy") model_dir = os.path.join(cur_dir, "world_model") eval_metrics_dir = os.path.join(cur_dir, "evaluator_" + now_tag) tf.logging.info("Writing metrics to %s." % eval_metrics_dir) if not tf.gfile.Exists(eval_metrics_dir): tf.gfile.MkDir(eval_metrics_dir) evaluate( loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, FLAGS.agent, FLAGS.eval_with_learner, FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None, debug_video_path=FLAGS.debug_video_path ) if __name__ == "__main__": tf.logging.set_verbosity(tf.logging.INFO) tf.app.run()
35.528986
112
0.730879
135a363584e253a0fd487ab5902e769a0e03931f
1,236
py
Python
src/part_2_automation/test_test1.py
AndreiHustiuc/IT_Factory_Course
c6f3e4a9282a1c19c0f52c79f0c81f026814a02a
[ "MIT" ]
null
null
null
src/part_2_automation/test_test1.py
AndreiHustiuc/IT_Factory_Course
c6f3e4a9282a1c19c0f52c79f0c81f026814a02a
[ "MIT" ]
null
null
null
src/part_2_automation/test_test1.py
AndreiHustiuc/IT_Factory_Course
c6f3e4a9282a1c19c0f52c79f0c81f026814a02a
[ "MIT" ]
1
2022-03-16T10:39:03.000Z
2022-03-16T10:39:03.000Z
# Generated by Selenium IDE import pytest import time import json from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
38.625
89
0.768608
135abe65fc98dab6544eb64993951b1e91db47a2
732
py
Python
app/grandchallenge/components/admin.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
1
2021-02-09T10:30:44.000Z
2021-02-09T10:30:44.000Z
app/grandchallenge/components/admin.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
null
null
null
app/grandchallenge/components/admin.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from grandchallenge.components.models import ( ComponentInterface, ComponentInterfaceValue, ) admin.site.register(ComponentInterface, ComponentInterfaceAdmin) admin.site.register(ComponentInterfaceValue, ComponentInterfaceValueAdmin)
23.612903
74
0.67623
135ad256377f1febc7ff1ea7598a8c696743a758
165
py
Python
publishtimer/__init__.py
paragguruji/publishtimer
b0b68d6c4d450a2cc22d29725e43c2a1261f0f74
[ "BSD-2-Clause" ]
null
null
null
publishtimer/__init__.py
paragguruji/publishtimer
b0b68d6c4d450a2cc22d29725e43c2a1261f0f74
[ "BSD-2-Clause" ]
null
null
null
publishtimer/__init__.py
paragguruji/publishtimer
b0b68d6c4d450a2cc22d29725e43c2a1261f0f74
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Mar 28 15:28:24 2016 @author: Parag Guruji, [email protected] """ from .helpers import setup_env done = setup_env()
16.5
44
0.678788
135d785a425446491b59ae18c63cdcf06bf42dd8
278
py
Python
netdisco/discoverables/nanoleaf_aurora.py
jjlawren/netdisco
ffc3cd092bff359b1c1fc1ed51940624b3c8076b
[ "Apache-2.0" ]
1
2020-01-05T21:36:10.000Z
2020-01-05T21:36:10.000Z
env/lib/python3.7/site-packages/netdisco/discoverables/nanoleaf_aurora.py
seanmitch/UPnP
f3cb1ef62657f166971c1c470ce5dfd58bdeeac9
[ "Unlicense" ]
null
null
null
env/lib/python3.7/site-packages/netdisco/discoverables/nanoleaf_aurora.py
seanmitch/UPnP
f3cb1ef62657f166971c1c470ce5dfd58bdeeac9
[ "Unlicense" ]
null
null
null
"""Discover Nanoleaf Aurora devices.""" from . import MDNSDiscoverable
27.8
74
0.726619
135f17354c6a575112f9dd1ee2ae823d8e499637
2,299
py
Python
debug/compute_score_common_ts_RETREAT.py
DavidSabbagh/meeg_power_regression
d9cd5e30028ffc24f08a52966c7641f611e92ee6
[ "BSD-3-Clause" ]
1
2020-12-18T06:10:16.000Z
2020-12-18T06:10:16.000Z
debug/compute_score_common_ts_RETREAT.py
DavidSabbagh/meeg_power_regression
d9cd5e30028ffc24f08a52966c7641f611e92ee6
[ "BSD-3-Clause" ]
null
null
null
debug/compute_score_common_ts_RETREAT.py
DavidSabbagh/meeg_power_regression
d9cd5e30028ffc24f08a52966c7641f611e92ee6
[ "BSD-3-Clause" ]
2
2021-03-01T01:36:38.000Z
2021-03-01T13:44:02.000Z
import os.path as op import numpy as np import pandas as pd from sklearn.pipeline import make_pipeline from sklearn.linear_model import RidgeCV from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold, cross_val_score import mne from pyriemann.tangentspace import TangentSpace import config_drago as cfg meg = 'mag' scale = 1e22 rank = 65 reg = 1e-6 seed = 42 n_jobs = 10 cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed) file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5') covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch) info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item() picks = mne.pick_types(info, meg=meg) covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg) X = proj_covs_ts(covs) X = X.reshape(len(X), -1) info = pd.read_csv(op.join(cfg.path_data, 'participants.csv')) subjects = [d['subject'] for d in covs_allch if 'subject' in d] y = info.set_index('Observations').age.loc[subjects] ridge = make_pipeline(StandardScaler(), RidgeCV(alphas=np.logspace(-3, 5, 100))) score = - cross_val_score(ridge, X, y, cv=cv, scoring="neg_mean_absolute_error", n_jobs=n_jobs, verbose=True)
31.930556
77
0.651588
135f22f80a61bf2986149f078e69c6a03f73a3a5
1,161
py
Python
bter/publish.py
mengalong/bter
7fa56f9c83429bc564e6d123498b14aae5c390b1
[ "Apache-2.0" ]
1
2017-08-30T01:01:50.000Z
2017-08-30T01:01:50.000Z
bter/publish.py
mengalong/bter
7fa56f9c83429bc564e6d123498b14aae5c390b1
[ "Apache-2.0" ]
null
null
null
bter/publish.py
mengalong/bter
7fa56f9c83429bc564e6d123498b14aae5c390b1
[ "Apache-2.0" ]
null
null
null
# Copyright 2017~ mengalong <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import daiquiri from six.moves.urllib import parse as urlparse from stevedore import driver logger = daiquiri.getLogger(__name__)
33.171429
77
0.709733
135f9b393c2da1b1ae7cb6b4ea51c6e47c87c3b3
2,976
py
Python
web3/_utils/module_testing/math_contract.py
y19818/web3.py
32a85a287ab63220d1e0c06d77be74de595ff02f
[ "MIT" ]
null
null
null
web3/_utils/module_testing/math_contract.py
y19818/web3.py
32a85a287ab63220d1e0c06d77be74de595ff02f
[ "MIT" ]
null
null
null
web3/_utils/module_testing/math_contract.py
y19818/web3.py
32a85a287ab63220d1e0c06d77be74de595ff02f
[ "MIT" ]
null
null
null
MATH_BYTECODE = ( "606060405261022e806100126000396000f360606040523615610074576000357c01000000000000" "000000000000000000000000000000000000000000009004806316216f391461007657806361bc22" "1a146100995780637cf5dab0146100bc578063a5f3c23b146100e8578063d09de08a1461011d5780" "63dcf537b11461014057610074565b005b610083600480505061016c565b60405180828152602001" "91505060405180910390f35b6100a6600480505061017f565b604051808281526020019150506040" "5180910390f35b6100d26004808035906020019091905050610188565b6040518082815260200191" "505060405180910390f35b61010760048080359060200190919080359060200190919050506101ea" "565b6040518082815260200191505060405180910390f35b61012a6004805050610201565b604051" "8082815260200191505060405180910390f35b610156600480803590602001909190505061021756" "5b6040518082815260200191505060405180910390f35b6000600d9050805080905061017c565b90" "565b60006000505481565b6000816000600082828250540192505081905550600060005054905080" "507f3496c3ede4ec3ab3686712aa1c238593ea6a42df83f98a5ec7df9834cfa577c5816040518082" "815260200191505060405180910390a18090506101e5565b919050565b6000818301905080508090" "506101fb565b92915050565b600061020d6001610188565b9050610214565b90565b600060078202" "90508050809050610229565b91905056" ) MATH_ABI = [ { "constant": False, "inputs": [], "name": "return13", "outputs": [ {"name": "result", "type": "int256"}, ], "type": "function", }, { "constant": True, "inputs": [], "name": "counter", "outputs": [ {"name": "", "type": "uint256"}, ], "type": "function", }, { "constant": False, "inputs": [ {"name": "amt", "type": "uint256"}, ], "name": "increment", "outputs": [ {"name": "result", "type": "uint256"}, ], "type": "function", }, { "constant": False, "inputs": [ {"name": "a", "type": "int256"}, {"name": "b", "type": "int256"}, ], "name": "add", "outputs": [ {"name": "result", "type": "int256"}, ], "type": "function", }, { "constant": False, "inputs": [], "name": "increment", "outputs": [ {"name": "", "type": "uint256"}, ], "type": "function" }, { "constant": False, "inputs": [ {"name": "a", "type": "int256"}, ], "name": "multiply7", "outputs": [ {"name": "result", "type": "int256"}, ], "type": "function", }, { "anonymous": False, "inputs": [ {"indexed": False, "name": "value", "type": "uint256"}, ], "name": "Increased", "type": "event", }, ]
32.347826
87
0.576949
135f9d0a7a2f751997f717a3c72579433fa4791e
4,269
py
Python
dnsdb/config.py
nuby/open_dnsdb
7fec703d8458083f0e6826393656055556e9f0b2
[ "Apache-2.0" ]
1
2019-09-27T01:06:55.000Z
2019-09-27T01:06:55.000Z
dnsdb/config.py
cclauss/open_dnsdb
28c2055685be1c173d77eaa2a05d8e156ccbbbf2
[ "Apache-2.0" ]
null
null
null
dnsdb/config.py
cclauss/open_dnsdb
28c2055685be1c173d77eaa2a05d8e156ccbbbf2
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import os import sys from datetime import timedelta from oslo.config import cfg CONF = cfg.CONF CONF.register_opts([ cfg.StrOpt('log-dir'), cfg.StrOpt('log-file'), cfg.StrOpt('debug'), cfg.StrOpt('verbose'), ], 'log') CONF.register_opts([ cfg.StrOpt('connection'), cfg.StrOpt('data'), ], 'DB') CONF.register_opts([ cfg.StrOpt('server'), cfg.StrOpt('port'), cfg.StrOpt('from_addr'), cfg.StrOpt('info_list'), cfg.StrOpt('alert_list'), ], 'MAIL') CONF.register_opts([ cfg.StrOpt('allow_ip'), cfg.StrOpt('secret_key'), cfg.StrOpt('env'), cfg.StrOpt('local_group'), cfg.StrOpt('acl_dir'), cfg.StrOpt('view_acl_group') ], 'etc') CONF.register_opts([ cfg.IntOpt('dnsupdater_port'), ], 'api') CONF.register_opts([ cfg.StrOpt('acl_groups'), cfg.IntOpt('cname_ttl'), cfg.StrOpt('view_zone') ], 'view') CONF.register_opts([ cfg.StrOpt('base-url', default='/', help='The url prefix of this site.'), cfg.StrOpt('run-mode', default="werkzeug", choices=('gunicorn', 'werkzeug'), help="Run server use the specify mode."), cfg.StrOpt('bind', default='0.0.0.0', help='The IP address to bind'), cfg.IntOpt('port', default=8080, help='The port to listen'), cfg.BoolOpt('debug', default=False), ], 'web') CONF.register_opts([ cfg.StrOpt('config', default=None, help='The path to a Gunicorn config file.'), cfg.StrOpt('bind', default='127.0.0.1:8888'), cfg.IntOpt('workers', default=0, help='The number of worker processes for handling requests'), cfg.BoolOpt('daemon', default=False, help='Daemonize the Gunicorn process'), cfg.StrOpt('accesslog', default=None, help='The Access log file to write to.' '"-" means log to stderr.'), cfg.StrOpt('loglevel', default='info', help='The granularity of Error log outputs.', choices=('debug', 'info', 'warning', 'error', 'critical')), cfg.BoolOpt('ignore-healthcheck-accesslog', default=False), cfg.IntOpt('timeout', default=30, help='Workers silent for more than this many seconds are ' 'killed and restarted.'), cfg.StrOpt('worker-class', default='sync', help='The type of workers to use.', choices=('sync', 'eventlet', 'gevent', 'tornado')) ], 'gunicorn')
31.160584
92
0.594284
136018fcec6b4476406646ec76b683c9bf3950c1
1,148
py
Python
rover/rover.py
cloudy/osr-rover-code
07d370ae1cde75eaf2d279fcc7f220c95cf6d736
[ "Apache-2.0" ]
null
null
null
rover/rover.py
cloudy/osr-rover-code
07d370ae1cde75eaf2d279fcc7f220c95cf6d736
[ "Apache-2.0" ]
null
null
null
rover/rover.py
cloudy/osr-rover-code
07d370ae1cde75eaf2d279fcc7f220c95cf6d736
[ "Apache-2.0" ]
null
null
null
from __future__ import print_function import time from rover import Robot from connections import Connections
19.457627
110
0.674216
136335a62a6f24cad26390348c87d9d3bbbba896
14,534
py
Python
aswiki/parser.py
scanner/django-aswiki
318908eeccc8da324846ac5ffc4d4a206f560521
[ "BSD-3-Clause" ]
null
null
null
aswiki/parser.py
scanner/django-aswiki
318908eeccc8da324846ac5ffc4d4a206f560521
[ "BSD-3-Clause" ]
1
2020-09-25T05:40:38.000Z
2020-09-28T05:41:27.000Z
aswiki/parser.py
scanner/django-aswiki
318908eeccc8da324846ac5ffc4d4a206f560521
[ "BSD-3-Clause" ]
null
null
null
# # File: $Id: parser.py 1865 2008-10-28 00:47:27Z scanner $ # """ This is where the logic and definition of our wiki markup parser lives. We use the Python Creoleparser (which requires Genshi) We make a custom dialect so that the parser can know the URL base for all of the topics (pages) in the wiki and some additional goop so that we can tell what other topics a given topic refers to. """ # system imports # from urllib import quote from urlparse import urlparse try: import threading except ImportError: import dummy_threading as threading # Django imports # from django.core.urlresolvers import reverse from django.utils.translation import ugettext as _ # 3rd party imports # from creoleparser.dialects import create_dialect, creole10_base, creole11_base from creoleparser.core import Parser from genshi import builder # We see if we have the 'typogrify' app installed. If we do we will # use it for rendering our templates to prettify them a bit. # try: from typogrify.templatetags.typogrify import typogrify except ImportError: # Model imports # from aswiki.models import Topic ############################################################################ ############################################################################ # ############################################################################ # def class_fn(topic_name): """ This function is invoked by the markup dialect every time it encounters a wiki topic. It returns a string that is the css class name to add to wiki links as they are turned in to proper <a href></a> links. We use this as a way to annotate topics that do not exist yet with some graphical attribute so that users can easily tell which topics are not yet created. We use the wiki.models.TopicManager's css_class_name method to do this lookup. NOTE: Since this module is imported by the wiki.models module we need to import that module inside here so that we can access the Topic model. This is cheap since it will already be imported. Arguments: - `topic_name`: the topic name being checked for existence. """ # XXX This is where we should do a cache lookup of the topic name # and only if that fails fall back to # Topic.objects.css_class_name(topic_name) # return Topic.objects.css_class_name(topic_name) #################################################################### # def output_mailto(arg_string): """ Given the arguments of an anchor macro output the proper genshi stream that will render a mailto link. We also need to support the magic argument string format of '<you> AT <word> AT <foo> DOT <foo>' Arguments: - `arg_string`: The argument string of the anchor macro. - `macro_body`: The macro body if provided - `block_type`: True if this is a block macro. """ # XXX Need to support the fancy format.. but for now just get the basic # working. return builder.tag.a(arg_string, href="mailto:%s" % arg_string) #################################################################### # def output_subtopics(arg_string): """ This will take a single string as its input. It will find all topics for which the string as a topic name is the parent topic. There is some semantic magic in a topic if it contains periods, ie: the '.' character. This forms a kind of hierarchy. Loosely speaking all topics that start with the same prefix, separated by '.' are sub-topics. So: 2007.Agenda is a sub-topic of 2007. 2007.Agenda.foo is a subtopic of 2007 and 2007.Agenda. This macro will insert in to the output <ul> of the topics that are proper subtopics of the given string, ordered by name. So in the above example if I were to say <<subtopics 2007>> it would give me "2007.Agenda" and "2007.Agenda.foo" in a <ul> If the arg string ends with a dot, then it is treated as the separator. ie: <<subtopics 2007.>> and <<subtopics 2007>> are identical. Arguments: - `arg_string`: The topic we want to find all subtopics of. """ arg_string = arg_string if arg_string[-1] != '.': arg_string = arg_string + "." topics = Topic.objects.filter(lc_name__istartswith = arg_string.lower()).order_by('lc_name') if topics.count() == 0: return None ul = builder.tag.ul() # For every topic that matches our pattern we insert a 'li' link # to that topic in our output. We also add this topic to the # 'extra_references' list in our global TOPIC_LIST object. This is # so that the prerender../save() methods of the Topic object we are # rendering this output for can know to add those topics to the list # of topics referenced by the topic being rendered. for topic in topics: TOPIC_LIST.extra_references.append(topic) ul.append(builder.tag.li(builder.tag.a(topic.name, href = topic.get_absolute_url()))) return ul #################################################################### # def output_attachments(arg_string): """ Returns a <ul> of all of the attachments attached to the topic name given as the arg_string. Arguments: - `arg_string`: Expected to be the name of a topic. If no such topic exist, then no attachment list is generated. """ try: topic = Topic.objects.get(lc_name = arg_string.lower()) except Topic.DoesNotExist: return None ul = builder.tag.ul() # For every file attachment on this topic, add a 'li' link # to that attachment. # for attachment in topic.file_attachments.all(): ul.append(builder.tag.li(builder.tag.a(attachment.basename(), href = attachment.get_absolute_url()))) return ul #################################################################### # def macro_fn(name, arg_string, macro_body, block_type, environ): """ Handles the macros we define for our version of markup. Arguments: - `name`: The name of the macro - `arg_string`: The argument string, including any delimiters - `macro_body`: The macro body, None for macro with no body. - `block_type`: True for block type macros. - `environ` : The environment object, passed through from creoleparser.core.Parser class's 'parse()' method. """ name = name.strip().lower() arg_string = arg_string.strip() if name == 'anchor': if block_type: return builder.tag.a(macro_body, name = arg_string) else: return builder.tag.a(name = arg_string) elif name == 'mailto': return output_mailto(arg_string) elif name == 'gettext': if block_type: return _(macro_body) else: return _(arg_string) elif name == 'subtopics': return output_subtopics(arg_string) elif name == 'attachlist': return output_attachments(arg_string) elif name == 'attachment': # For including downloadable attachments in a wiki document. if block_type: return builder.tag.a(macro_body, href=arg_string) else: return builder.tag.a(arg_string, href=arg_string) return None ## ## Create our custom dialect. It will use our class function and a TopicList ## instance. The root URL for all wiki topics will be the same as the ## 'aswiki_topic_index' url. ## ## NOTE: This assumes that the url for a specific Topic is the same as the url ## for the aswiki_topic_index with the Topic name appended to it ## TOPIC_LIST = TopicList() # dialect = creoleparser.dialects.Creole10( # wiki_links_base_url = reverse('aswiki_topic_index'), # wiki_links_space_char = '%20', # use_additions = True, # no_wiki_monospace = False, # wiki_links_class_func = class_fn, # wiki_links_path_func = TOPIC_LIST.path_fn, # macro_func = macro_fn, # interwiki_links_base_urls=dict(wikicreole='http://wikicreole.org/wiki/', # wikipedia='http://wikipedia.org/wiki/',) # ) parser = Parser(dialect = create_dialect(\ creole11_base, wiki_links_base_url = reverse('aswiki_topic_index'), # NOTE: Make this # a two element # list for images # to be loaded # from a separate # URL wiki_links_space_char = '%20', # NOTE: make this a two element list to # give images a different space # character. no_wiki_monospace = False, wiki_links_class_func = class_fn, wiki_links_path_func = (TOPIC_LIST.path_fn, TOPIC_LIST.image_fn), bodied_macros = { }, non_bodied_macros = { }, macro_func = macro_fn, # custom_markup = (), interwiki_links_base_urls = { 'wikicreole' : 'http://wikicreole.org/wiki/', 'wikipedia' :'http://wikipedia.org/wiki/' } ))
37.848958
96
0.597083
1365f047bda189ac06139ef3c589483027732b74
13,825
py
Python
oauth_api/validators.py
anobi/django-oauth-api
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
[ "BSD-2-Clause" ]
null
null
null
oauth_api/validators.py
anobi/django-oauth-api
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
[ "BSD-2-Clause" ]
null
null
null
oauth_api/validators.py
anobi/django-oauth-api
95bf9b500dab326553a5a8a17d5c6da1a34f6ac4
[ "BSD-2-Clause" ]
4
2015-07-30T11:03:54.000Z
2017-11-13T15:30:48.000Z
import base64 import binascii from datetime import timedelta from django.contrib.auth import authenticate from django.utils import timezone from oauthlib.oauth2 import RequestValidator from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication from oauth_api.settings import oauth_api_settings GRANT_TYPE_MAPPING = { 'authorization_code': (AbstractApplication.GRANT_AUTHORIZATION_CODE,), 'password': (AbstractApplication.GRANT_PASSWORD,), 'client_credentials': (AbstractApplication.GRANT_CLIENT_CREDENTIALS,), 'refresh_token': (AbstractApplication.GRANT_AUTHORIZATION_CODE, AbstractApplication.GRANT_PASSWORD, AbstractApplication.GRANT_CLIENT_CREDENTIALS) }
37.364865
124
0.636166
13662d9d6240c63c9a734dfc856cbc7f4107d5e2
160
py
Python
objects/fun_return.py
padmacho/pythontutorial
80c58d2d6efc0c3598f92b627338c6cd9fda1759
[ "Apache-2.0" ]
null
null
null
objects/fun_return.py
padmacho/pythontutorial
80c58d2d6efc0c3598f92b627338c6cd9fda1759
[ "Apache-2.0" ]
null
null
null
objects/fun_return.py
padmacho/pythontutorial
80c58d2d6efc0c3598f92b627338c6cd9fda1759
[ "Apache-2.0" ]
null
null
null
x = [1, 2, 3] y = modify(x) print("x == y", x == y) print("x == y", x is y)
26.666667
65
0.55625
1366f50a70db89f7b6f66ff4d8a7cc0516afcf2f
6,471
py
Python
edx_gen/_write_comps.py
hberndl70/mooc-generator
58ff77ece12b456887ec24db79d8baa87ecd5621
[ "MIT" ]
null
null
null
edx_gen/_write_comps.py
hberndl70/mooc-generator
58ff77ece12b456887ec24db79d8baa87ecd5621
[ "MIT" ]
null
null
null
edx_gen/_write_comps.py
hberndl70/mooc-generator
58ff77ece12b456887ec24db79d8baa87ecd5621
[ "MIT" ]
null
null
null
import sys, os import tarfile import shutil from edx_gen import _edx_consts from edx_gen import _read_metadata from edx_gen import _write_structure from edx_gen import _write_comps from edx_gen import _write_comp_html from edx_gen import _write_comp_checkboxes from edx_gen import _write_comp_video from edx_gen import _xml_google_doc from edx_gen import _markdown from edx_gen import _util import __SETTINGS__ #-------------------------------------------------------------------------------------------------- # Text strings WARNING = " WARNING:" #-------------------------------------------------------------------------------------------------- # write to either units folder or problems folder, depending on the type #-------------------------------------------------------------------------------------------------- # write to either units folder or problems folder, depending on the type #--------------------------------------------------------------------------------------------------
38.064706
117
0.641323
1367201e3118f25640a5bbd95836976d130709a4
2,403
py
Python
grading_program.py
ByeonghoonJeon/Student-Grading
eee55638aee4390d7758c1204b85cce7279ccdf7
[ "MIT" ]
null
null
null
grading_program.py
ByeonghoonJeon/Student-Grading
eee55638aee4390d7758c1204b85cce7279ccdf7
[ "MIT" ]
null
null
null
grading_program.py
ByeonghoonJeon/Student-Grading
eee55638aee4390d7758c1204b85cce7279ccdf7
[ "MIT" ]
null
null
null
# 1. Create students score dictionary. students_score = {} # 2. Input student's name and check if input is correct. (Alphabet, period, and blank only.) # 2.1 Creat a function that evaluate the validity of name. while True: # 2.2 Input student's name. name = input("Please input student's name. \n") check_name(name) # 2.3 Check if the name is alphabet. If not, ask to input correct name again. while check_name(name) != True: name = input("Please input student's name. (Alphabet and period only.)\n") # 3. Input student's score and check if input is correct. (digits only and between zero and 100) score = input(f"Please input {name}'s score.(0 ~ 100)\n") while score.isdigit() == False or int(score) not in range(0, 101): score = input("Please input valid numbers only.(Number from zero to 100.)\n") students_score[name] = score # 4. Ask another student's information. another_student = input( "Do you want to input another student's information as well? (Y/N)\n" ) while another_student.lower() not in ("yes", "y", "n", "no"): # 4.1 Check if the input is valid. another_student = input("Please input Y/N only.\n") if another_student.lower() in ("yes", "y"): continue elif another_student.lower() in ("no", "n"): break for student in students_score: score = students_score[student] score = int(score) if score >= 90: students_score[student] = "A" elif score in range(70, 90): students_score[student] = "B" elif score in range(50, 70): students_score[student] = "C" elif score in range(40, 50): students_score[student] = "D" else: students_score[student] = "F" print(students_score)
36.969231
100
0.646275
136832386a64df8b748feecd13c99a4dc30fc9d5
735
py
Python
test/test_utils.py
by46/recipe
203abd2141a536b66b4e57d073169a49395be1f0
[ "MIT" ]
null
null
null
test/test_utils.py
by46/recipe
203abd2141a536b66b4e57d073169a49395be1f0
[ "MIT" ]
null
null
null
test/test_utils.py
by46/recipe
203abd2141a536b66b4e57d073169a49395be1f0
[ "MIT" ]
null
null
null
import unittest from recipe import utils
33.409091
77
0.719728
1368b69e2269d6b7303299c5097db81eca903217
6,708
py
Python
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py
wangxihao/rgbd-kinect-pose
03180723c99759ba2500bcd42b5fe7a1d26eb507
[ "MIT" ]
1
2022-02-07T06:12:26.000Z
2022-02-07T06:12:26.000Z
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py
wangxihao/rgbd-kinect-pose
03180723c99759ba2500bcd42b5fe7a1d26eb507
[ "MIT" ]
null
null
null
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py
wangxihao/rgbd-kinect-pose
03180723c99759ba2500bcd42b5fe7a1d26eb507
[ "MIT" ]
null
null
null
import numpy as np import cv2 import os.path as osp import json from human_body_prior.tools.model_loader import load_vposer import torch vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/' def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32): ''' Calculates the rotation matrices for a batch of rotation vectors Parameters ---------- rot_vecs: torch.tensor Nx3 array of N axis-angle vectors Returns ------- R: torch.tensor Nx3x3 The rotation matrices for the given axis-angle parameters ''' batch_size = rot_vecs.shape[0] device = rot_vecs.device angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True) rot_dir = rot_vecs / angle cos = torch.unsqueeze(torch.cos(angle), dim=1) sin = torch.unsqueeze(torch.sin(angle), dim=1) # Bx1 arrays rx, ry, rz = torch.split(rot_dir, 1, dim=1) K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device) zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device) K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \ .view((batch_size, 3, 3)) ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0) rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K) return rot_mat
35.120419
118
0.570662
1368b793b823b3bd0b461ed385d6e6b6434e1e68
3,455
py
Python
scripts/dev/dockerutil.py
axelbarjon/mongodb-kubernetes-operator
13eb844c55774ce8a6de51edde1a66b4371f3ef6
[ "RSA-MD" ]
1
2021-03-24T17:54:51.000Z
2021-03-24T17:54:51.000Z
scripts/dev/dockerutil.py
axelbarjon/mongodb-kubernetes-operator
13eb844c55774ce8a6de51edde1a66b4371f3ef6
[ "RSA-MD" ]
18
2021-03-08T13:38:37.000Z
2022-02-14T15:06:28.000Z
scripts/dev/dockerutil.py
axelbarjon/mongodb-kubernetes-operator
13eb844c55774ce8a6de51edde1a66b4371f3ef6
[ "RSA-MD" ]
1
2021-03-25T13:37:02.000Z
2021-03-25T13:37:02.000Z
import docker from dockerfile_generator import render import os import json from tqdm import tqdm from typing import Union, Any, Optional def build_image(repo_url: str, tag: str, path: str) -> None: """ build_image builds the image with the given tag """ client = docker.from_env() print(f"Building image: {tag}") client.images.build(tag=tag, path=path) print("Successfully built image!") def push_image(tag: str) -> None: """ push_image pushes the given tag. It uses the current docker environment """ client = docker.from_env() print(f"Pushing image: {tag}") with tqdm(total=100, ascii=False) as progress_bar: last_percent = 0.0 for line in client.images.push(tag, stream=True): percent = get_completion_percentage(line) if percent: progress_bar.update(percent - last_percent) last_percent = percent def build_and_push_image(repo_url: str, tag: str, path: str, image_type: str) -> None: """ build_and_push_operator creates the Dockerfile for the operator and pushes it to the target repo """ dockerfile_text = render(image_type, ["."]) with open(f"{path}/Dockerfile", "w") as f: f.write(dockerfile_text) build_image(repo_url, tag, path) os.remove(f"{path}/Dockerfile") push_image(tag)
32.28972
108
0.636758
136925370fda5dbcb2e2d6d5e61c676370502bb7
904
py
Python
scripts/VCF/UTILS/select_variants.py
elowy01/igsr_analysis
ffea4885227c2299f886a4f41e70b6e1f6bb43da
[ "Apache-2.0" ]
3
2018-04-20T15:04:34.000Z
2022-03-30T06:36:02.000Z
scripts/VCF/UTILS/select_variants.py
elowy01/igsr_analysis
ffea4885227c2299f886a4f41e70b6e1f6bb43da
[ "Apache-2.0" ]
7
2019-06-06T09:22:20.000Z
2021-11-23T17:41:52.000Z
scripts/VCF/UTILS/select_variants.py
elowy01/igsr_analysis
ffea4885227c2299f886a4f41e70b6e1f6bb43da
[ "Apache-2.0" ]
5
2017-11-02T11:17:35.000Z
2021-12-11T19:34:09.000Z
from VcfFilter import VcfFilter import argparse import os #get command line arguments parser = argparse.ArgumentParser(description='Script to select a certain variant type from a VCF file') #parameters parser.add_argument('--bcftools_folder', type=str, required=True, help='Folder containing the Bcftools binary' ) parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' ) parser.add_argument('--type', type=str, required=False, help='Type of variant to select. i.e. snps/indels etc' ) args = parser.parse_args() if __name__ == '__main__': vcf_f=VcfFilter(vcf=args.filename,bcftools_folder=args.bcftools_folder) vcf_f.filter_by_variant_type(type=args.type)
39.304348
275
0.766593
13695de67e652f576d889f205ef664189b73d45b
14,680
py
Python
site/tests/unittests/test/test_base64.py
martinphellwig/brython_wf
e169afc1e048cba0c12118b4cd6f109df6fe67c9
[ "BSD-3-Clause" ]
652
2015-07-26T00:00:17.000Z
2022-02-24T18:30:04.000Z
site/tests/unittests/test/test_base64.py
martinphellwig/brython_wf
e169afc1e048cba0c12118b4cd6f109df6fe67c9
[ "BSD-3-Clause" ]
8
2015-09-07T03:38:19.000Z
2021-05-23T03:18:51.000Z
check-python33-manual/samples/standard_library_337/Lib/test/test_base64.py
DaveKaretnyk/parsing-utils2
40085bbd399fa605f2f2a4708d385a64ffc907de
[ "MIT" ]
40
2015-07-24T19:45:08.000Z
2021-11-01T14:54:56.000Z
import unittest from test import support import base64 import binascii import os import sys import subprocess def test_main(): support.run_unittest(__name__) if __name__ == '__main__': test_main()
41.586402
83
0.570981
1369857b721c52701d49ebb99393f03d4c246712
569
py
Python
appliance_catalog/migrations/0015_appliance_icon_py3.py
ChameleonCloud/portal
92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee
[ "Apache-2.0" ]
3
2015-08-04T20:53:41.000Z
2020-02-14T22:58:20.000Z
appliance_catalog/migrations/0015_appliance_icon_py3.py
ChameleonCloud/portal
92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee
[ "Apache-2.0" ]
103
2015-01-15T14:21:00.000Z
2022-03-31T19:14:20.000Z
appliance_catalog/migrations/0015_appliance_icon_py3.py
ChameleonCloud/portal
92a06fb926dc36e997b94fb8dcd22b7e0d24d3ee
[ "Apache-2.0" ]
4
2016-02-22T16:48:20.000Z
2021-01-08T17:13:21.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-02-25 20:32 from __future__ import unicode_literals from django.db import migrations, models
24.73913
86
0.644991
136a5b3d51a58e910193de0d1a2f38a488d4801a
1,435
py
Python
twitoff/twitter.py
ChristopherKchilton/twitoff-ChristopherKchilton
fbac9899feff256ededab009b28e2f6ebd67f476
[ "MIT" ]
1
2021-09-23T22:04:09.000Z
2021-09-23T22:04:09.000Z
twitoff/twitter.py
ChristopherKchilton/twitoff-ChristopherKchilton
fbac9899feff256ededab009b28e2f6ebd67f476
[ "MIT" ]
null
null
null
twitoff/twitter.py
ChristopherKchilton/twitoff-ChristopherKchilton
fbac9899feff256ededab009b28e2f6ebd67f476
[ "MIT" ]
null
null
null
"""Retrieve and request tweets from the DS API""" import requests import spacy from .models import DB, Tweet, User nlp = spacy.load("my_model") # Add and updates tweets def add_or_update_user(username): """Adds and updates the user with twiter handle 'username' to our database """ #TODO: Figure out try: r = requests.get( f"https://lambda-ds-twit-assist.herokuapp.com/user/{username}") user = r.json() user_id = user["twitter_handle"]["id"] # print(user) # This is either respectively grabs or creates a user for our db db_user = (User.query.get(user_id)) or User(id=user_id, name=username) # This adds the db_user to our database DB.session.add(db_user) tweets = user["tweets"] # if tweets: # db_user.newest_tweet_id = tweets[0].id for tweet in tweets: tweet_vector = vectorize_tweet(tweet["full_text"]) tweet_id = tweet["id"] db_tweet = (Tweet.query.get(tweet_id)) or Tweet( id=tweet["id"], text=tweet["full_text"], vect=tweet_vector) db_user.tweets.append(db_tweet) DB.session.add(db_tweet) except Exception as e: print("Error processing {}: {}".format(username, e)) raise e else: DB.session.commit()
25.175439
78
0.608362
136a6f790e3bad7ef05103763b29d85ab9987e87
595
py
Python
day22.py
p88h/aoc2017
a929a8c0894559b0d7dd3d0b58c076295087f4c8
[ "Unlicense" ]
1
2021-12-26T21:28:47.000Z
2021-12-26T21:28:47.000Z
day22.py
p88h/aoc2017
a929a8c0894559b0d7dd3d0b58c076295087f4c8
[ "Unlicense" ]
null
null
null
day22.py
p88h/aoc2017
a929a8c0894559b0d7dd3d0b58c076295087f4c8
[ "Unlicense" ]
null
null
null
import io grid = {} y = 0 x = 0 for l in io.open("day22.in").read().splitlines(): for x in range(len(l)): grid[(y,x)] = l[x] y += 1 y = y // 2 x = x // 2 dx = 0 dy = -1 r = 0 for iter in range(10000000): if (y,x) not in grid or grid[(y,x)] == '.': (dy, dx) = (-dx, dy) grid[(y,x)] = 'W' elif grid[(y,x)] == 'W': grid[(y,x)] = '#' r += 1 elif grid[(y,x)] == '#': (dy, dx) = (dx, -dy) grid[(y,x)] = 'F' elif grid[(y,x)] == 'F': (dy, dx) = (-dy, -dx) grid[(y,x)] = '.' y += dy x += dx print(r)
19.833333
49
0.381513
136b2299eb41b7ded97c6048734842406f59258d
3,437
py
Python
ansys/dpf/core/errors.py
TheGoldfish01/pydpf-core
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
[ "MIT" ]
11
2021-01-31T15:50:02.000Z
2021-10-01T23:15:38.000Z
ansys/dpf/core/errors.py
TheGoldfish01/pydpf-core
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
[ "MIT" ]
46
2021-01-14T05:00:50.000Z
2021-10-06T18:30:37.000Z
ansys/dpf/core/errors.py
TheGoldfish01/pydpf-core
75ca8a180454f94cedafbc68c1d6f20dcfc4c795
[ "MIT" ]
3
2021-06-30T07:18:30.000Z
2021-09-15T08:43:11.000Z
from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous from functools import wraps _COMPLEX_PLOTTING_ERROR_MSG = """ Complex fields cannot be plotted. Use operators to get the amplitude or the result at a defined sweeping phase before plotting. """ _FIELD_CONTAINER_PLOTTING_MSG = """" This fields_container contains multiple fields. Only one time-step result can be plotted at a time. Extract a field with ``fields_container[index]``. """ def protect_grpc(func): """Capture gRPC exceptions and return a more succinct error message.""" return wrapper
28.882353
78
0.672971
136b533fe267643a58604244ebaf15ea2c8117bd
11
py
Python
ls10.py
yu961549745/pynote
5976aeeca6368c0956baddf6a9ccb93ae8e0612a
[ "MIT" ]
null
null
null
ls10.py
yu961549745/pynote
5976aeeca6368c0956baddf6a9ccb93ae8e0612a
[ "MIT" ]
null
null
null
ls10.py
yu961549745/pynote
5976aeeca6368c0956baddf6a9ccb93ae8e0612a
[ "MIT" ]
null
null
null
''' IO '''
2.75
3
0.181818
136b5475110947f42e139d24c848b375d4d0e140
2,144
py
Python
deep_disfluency/feature_extraction/wer_calculation_from_final_asr_results.py
askender/deep_disfluency
bea8403ed954df8eadd3e2b9d98bb7c2b416a665
[ "MIT" ]
null
null
null
deep_disfluency/feature_extraction/wer_calculation_from_final_asr_results.py
askender/deep_disfluency
bea8403ed954df8eadd3e2b9d98bb7c2b416a665
[ "MIT" ]
null
null
null
deep_disfluency/feature_extraction/wer_calculation_from_final_asr_results.py
askender/deep_disfluency
bea8403ed954df8eadd3e2b9d98bb7c2b416a665
[ "MIT" ]
null
null
null
from mumodo.mumodoIO import open_intervalframe_from_textgrid import numpy from deep_disfluency.utils.accuracy import wer final_file = open('wer_test.text', "w") ranges1 = [line.strip() for line in open( "/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASR_ranges.text")] ranges2 = [line.strip() for line in open( "/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASR_ranges.text")] for ranges in [ranges1, ranges2]: final_file.write("\n\n") for r in ranges: for s in ["A", "B"]: iframe = open_intervalframe_from_textgrid("{0}{1}.TextGrid" .format(r, s)) hyp = " ".join(iframe['Hyp']['text']) ref = " ".join(iframe['Ref']['text']) wer = wer(ref, hyp) cost = wer(ref, hyp, macro=True) print r, s, wer print>>final_file, r, s, wer, cost final_file.close() # Based on the results, output the 'good' ASR results results = open("wer_test.text") no_ho = 0 no_test = 0 ingood = True file = open("../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASRgood_ranges.text", "w") for l in results: # print l if l == "\n": print no_ho no_ho = 0 file.close() file = open( "../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASRgood_ranges.text", "w") continue if float(l.strip('\n').split(" ")[ 2]) < 0.4: # both speakers are under 40% error rate- likely half decent separation # print l if ingood and "B" in l.strip("\n").split(" ")[1]: no_ho += 1 #file.write(l.strip('\n').split(" ")[0]+l.strip('\n').split(" ")[1]+"\n") file.write(l.strip('\n').split(" ")[0] + "\n") ingood = True else: ingood = False print no_ho results.close() file.close()
36.965517
158
0.619403
136b8fe5a8aa80e827bc10eb9537bdf94b4fdc81
1,541
py
Python
newsweec/utils/_dataclasses.py
Adwaith-Rajesh/newsweec
f3b66fb6f74cb68be4e716269032db340abe8320
[ "MIT" ]
13
2020-08-30T10:52:29.000Z
2021-08-18T12:20:39.000Z
newsweec/utils/_dataclasses.py
Adwaith-Rajesh/newsweec
f3b66fb6f74cb68be4e716269032db340abe8320
[ "MIT" ]
null
null
null
newsweec/utils/_dataclasses.py
Adwaith-Rajesh/newsweec
f3b66fb6f74cb68be4e716269032db340abe8320
[ "MIT" ]
1
2021-06-07T04:01:37.000Z
2021-06-07T04:01:37.000Z
from dataclasses import dataclass from dataclasses import field from time import time from typing import Any from typing import Callable from typing import Dict from typing import List from typing import Optional from typing import Tuple
22.661765
81
0.668397
136bbda00809274a9f8b16997fd9b06b349771f8
3,754
py
Python
vivo2notld/definitions/person_definition.py
gwu-libraries/vivo2notld
3f579f8aad28c60119864757e1fe66c2d64a0149
[ "MIT" ]
5
2015-09-23T10:05:29.000Z
2016-04-07T17:08:38.000Z
vivo2notld/definitions/person_definition.py
gwu-libraries/vivo2notld
3f579f8aad28c60119864757e1fe66c2d64a0149
[ "MIT" ]
null
null
null
vivo2notld/definitions/person_definition.py
gwu-libraries/vivo2notld
3f579f8aad28c60119864757e1fe66c2d64a0149
[ "MIT" ]
null
null
null
from .document_summary import definition as document_summary_definition from .organization_summary import definition as organization_summmary_definition definition = { "where": "?subj a foaf:Person .", "fields": { "name": { "where": "?subj rdfs:label ?obj ." }, #Contact info "email": { "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasEmail ?vce . ?vce a vcard:Email, vcard:Work . ?vce vcard:email ?obj . """ }, "telephone": { "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasTelephone ?vct . ?vct a vcard:Telephone . ?vct vcard:telephone ?obj . """ }, "address": { "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasAddress ?obj . """, "definition": { "where": "?subj a vcard:Address .", "fields": { "address": { "where": "?subj vcard:streetAddress ?obj ." }, "city": { "where": "?subj vcard:locality ?obj ." }, "state": { "where": "?subj vcard:region ?obj ." }, "zip": { "where": "?subj vcard:postalCode ?obj ." } } } }, "website": { "list": True, "where": """ ?subj obo:ARG_2000028 ?vc . ?vc a vcard:Kind . ?vc vcard:hasURL ?vcu . ?vcu a vcard:URL . ?vcu vcard:url ?obj . """, "optional": True }, "researchArea": { "where": """ ?subj vivo:hasResearchArea ?ra . ?ra rdfs:label ?obj . """, "optional": True, "list": True }, "geographicFocus": { "where": """ ?subj vivo:geographicFocus ?gf . ?gf rdfs:label ?obj . """, "optional": True, "list": True }, "overview": { "where": "?subj vivo:overview ?obj .", "optional": True, }, "positions": { "where": "?subj vivo:relatedBy ?obj .", "definition": { "where": "?subj a vivo:Position .", "fields": { "title": { "where": "?subj rdfs:label ?obj ." }, "organization": { "where": "?subj vivo:relates ?obj .", "definition": organization_summmary_definition } } }, "optional": True, "list": True }, "publications": { "where": """ ?subj vivo:relatedBy ?aship . ?aship a vivo:Authorship . ?aship vivo:relates ?obj . """, "definition": document_summary_definition, "optional": True, "list": True } } }
33.221239
80
0.34017
136d5996f3e902f896a0d95201a3a98051d0cce2
1,553
py
Python
apart/search.py
ruslan-ok/ServerApps
541aa12f1933054a12f590ce78544178be374669
[ "MIT" ]
1
2021-06-07T02:14:13.000Z
2021-06-07T02:14:13.000Z
apart/search.py
ruslan-ok/ServerApps
541aa12f1933054a12f590ce78544178be374669
[ "MIT" ]
9
2021-08-14T07:53:47.000Z
2022-03-18T19:07:22.000Z
apart/search.py
ruslan-ok/ServerApps
541aa12f1933054a12f590ce78544178be374669
[ "MIT" ]
null
null
null
from django.db.models import Q from hier.search import SearchResult from .models import app_name, Apart, Meter, Bill, Service, Price
45.676471
146
0.696072
136df33d64bf85a2b5e33607c10d78558114c0b0
5,884
py
Python
pyrevolve/experiment_management.py
MRebolle/Battery-Robot
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
[ "Apache-1.1" ]
null
null
null
pyrevolve/experiment_management.py
MRebolle/Battery-Robot
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
[ "Apache-1.1" ]
null
null
null
pyrevolve/experiment_management.py
MRebolle/Battery-Robot
1b97e8c77cf7eff7d5cc7e417b4e5ec97e4011e7
[ "Apache-1.1" ]
null
null
null
import os import shutil import numpy as np from pyrevolve.custom_logging.logger import logger import sys
41.730496
181
0.653977
136e61a21e482a156b8b710f63c002d29a6f7812
2,136
py
Python
books/model/Instrumentation.py
nudglabs/books-python-wrappers
8844eca8fe681542644a70749b72a6dc4e48c171
[ "MIT" ]
9
2015-04-01T08:59:49.000Z
2022-01-27T01:27:45.000Z
books/model/Instrumentation.py
nudglabs/books-python-wrappers
8844eca8fe681542644a70749b72a6dc4e48c171
[ "MIT" ]
3
2020-05-14T04:22:22.000Z
2021-08-06T11:19:03.000Z
books/model/Instrumentation.py
nudglabs/books-python-wrappers
8844eca8fe681542644a70749b72a6dc4e48c171
[ "MIT" ]
11
2016-04-14T10:59:36.000Z
2020-08-19T13:26:05.000Z
#$Id$
25.428571
67
0.627341
136f314f36b3d7d707a24bb2dc1a76fc985f86a7
1,079
py
Python
DPR/setup.py
sophiaalthammer/parm
ecf2dce5ee225b18e1ed3736a86696cc81e0797c
[ "MIT" ]
18
2022-01-06T13:03:40.000Z
2022-03-29T14:24:23.000Z
DPR/setup.py
k-for-code/parm
ecf2dce5ee225b18e1ed3736a86696cc81e0797c
[ "MIT" ]
1
2022-01-20T08:45:19.000Z
2022-01-24T05:18:40.000Z
DPR/setup.py
k-for-code/parm
ecf2dce5ee225b18e1ed3736a86696cc81e0797c
[ "MIT" ]
4
2021-05-27T08:33:18.000Z
2022-02-20T17:45:40.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup with open("README.md") as f: readme = f.read() setup( name="dpr", version="0.1.0", description="Facebook AI Research Open Domain Q&A Toolkit", url="https://github.com/facebookresearch/DPR/", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "setuptools>=18.0", ], install_requires=[ "cython", "faiss-cpu>=1.6.1", "filelock", "numpy", "regex", "torch>=1.2.0", "transformers>=3.0.0,<3.1.0", "tqdm>=4.27", "wget", "spacy>=2.1.8", ], )
25.690476
69
0.594995
136f33f06908f09a707c44642cdf5eac1e23e341
2,817
py
Python
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py
BillionsRichard/pycharmWorkspace
709e2681fc6d85ff52fb25717215a365f51073aa
[ "Apache-2.0" ]
null
null
null
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py
BillionsRichard/pycharmWorkspace
709e2681fc6d85ff52fb25717215a365f51073aa
[ "Apache-2.0" ]
null
null
null
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py
BillionsRichard/pycharmWorkspace
709e2681fc6d85ff52fb25717215a365f51073aa
[ "Apache-2.0" ]
null
null
null
# encoding: utf-8 """ @version: v1.0 @author: Richard @license: Apache Licence @contact: [email protected] @site: @software: PyCharm @time: 2019/9/12 20:37 """ from pprint import pprint as pp from operator import itemgetter import time from collections import OrderedDict from hard.smallest_range.srcs.big_2d_list import BIG_LIST_85 from hard.smallest_range.srcs.big_2d_list import BIG_LIST_86 if __name__ == '__main__': s = Solution() nums = [[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]] # nums = [[10], [11]] # nums = [[11,38,83, # 84,84,85,88,89,89,92],[28,61,89],[52,77,79,80,81],[21,25,26,26,26,27],[9,83,85,90],[84,85,87],[26,68,70,71],[36,40,41,42,45],[-34,21],[-28,-28,-23,1,13,21,28,37,37,38],[-74,1,2,22,33,35,43,45],[54,96,98,98,99],[43,54,60,65,71,75],[43,46],[50,50,58,67,69],[7,14,15],[78,80,89,89,90],[35,47,63,69,77,92,94]] # [-74, 1, 2, 22, 33, 35, 43, 45], [54, 96, 98, 98, 99], [43, 54, 60, 65, 71, 75], [43, 46], # [50, 50, 58, 67, 69], [7, 14, 15], [78, 80, 89, 89, 90], [35, 47, 63, 69, 77, 92, 94]] nums = BIG_LIST_85 # nums = BIG_LIST_86 min_range = s.smallestRange(nums) print(min_range)
31.651685
311
0.547746
136ff9b25221822f1896aad526fbb94d03b405cd
4,887
py
Python
pcf/particle/gcp/storage/storage.py
davidyum/Particle-Cloud-Framework
f6325a60a3838f86bd73bf4071438e12f9c68f8d
[ "Apache-2.0" ]
null
null
null
pcf/particle/gcp/storage/storage.py
davidyum/Particle-Cloud-Framework
f6325a60a3838f86bd73bf4071438e12f9c68f8d
[ "Apache-2.0" ]
null
null
null
pcf/particle/gcp/storage/storage.py
davidyum/Particle-Cloud-Framework
f6325a60a3838f86bd73bf4071438e12f9c68f8d
[ "Apache-2.0" ]
null
null
null
from pcf.core.gcp_resource import GCPResource from pcf.core import State import logging from google.cloud import storage from google.cloud import exceptions logger = logging.getLogger(__name__)
28.91716
104
0.602415
13700654033637b55b5386791b563e0e83f1b925
498
py
Python
cimcb/utils/smooth.py
CIMCB/cimcb
5d30f80423ed94e1068871b30e465b38d451581a
[ "MIT" ]
5
2020-05-26T23:45:40.000Z
2022-01-13T00:40:14.000Z
cimcb/utils/smooth.py
CIMCB/cimcb
5d30f80423ed94e1068871b30e465b38d451581a
[ "MIT" ]
3
2020-10-20T09:03:18.000Z
2021-11-01T14:22:05.000Z
cimcb/utils/smooth.py
KevinMMendez/cimcb
fe831253b122ed0ff9e33cbd160ef721abee1e38
[ "MIT" ]
4
2020-10-12T07:17:43.000Z
2022-03-28T06:28:44.000Z
import numpy as np
33.2
65
0.588353
13710731fb3b914385bee296e01c654e62f3641b
11,810
py
Python
ezeeai/core/extensions/best_exporter.py
jmarine/ezeeai
091b4ce3bc5794c534084bff3301b15ba8a9be1a
[ "Apache-2.0" ]
19
2019-06-12T03:14:59.000Z
2021-05-31T16:02:53.000Z
ezeeai/core/extensions/best_exporter.py
jmarine/ezeeai
091b4ce3bc5794c534084bff3301b15ba8a9be1a
[ "Apache-2.0" ]
29
2019-06-27T10:15:38.000Z
2022-03-11T23:46:36.000Z
ezeeai/core/extensions/best_exporter.py
jmarine/ezeeai
091b4ce3bc5794c534084bff3301b15ba8a9be1a
[ "Apache-2.0" ]
10
2019-05-14T17:45:44.000Z
2020-08-26T13:25:04.000Z
from __future__ import absolute_import import abc import os import json import glob import shutil from tensorflow.python.estimator import gc from tensorflow.python.estimator import util from tensorflow.python.estimator.canned import metric_keys from tensorflow.python.framework import errors_impl from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging from tensorflow.python.summary import summary_iterator from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter def _verify_compare_fn_args(compare_fn): """Verifies compare_fn arguments.""" args = set(util.fn_args(compare_fn)) if 'best_eval_result' not in args: raise ValueError( 'compare_fn (%s) must include best_eval_result argument.' % compare_fn) if 'current_eval_result' not in args: raise ValueError( 'compare_fn (%s) must include current_eval_result argument.' % compare_fn) non_valid_args = list(args - set(['best_eval_result', 'current_eval_result'])) if non_valid_args: raise ValueError('compare_fn (%s) has following not expected args: %s' % (compare_fn, non_valid_args)) def _loss_smaller(best_eval_result, current_eval_result): """Compares two evaluation results and returns true if the 2nd one is smaller. Both evaluation results should have the values for MetricKeys.LOSS, which are used for comparison. Args: best_eval_result: best eval metrics. current_eval_result: current eval metrics. Returns: True if the loss of current_eval_result is smaller; otherwise, False. Raises: ValueError: If input eval result is None or no loss is available. """ default_key = metric_keys.MetricKeys.LOSS if not best_eval_result or default_key not in best_eval_result: raise ValueError( 'best_eval_result cannot be empty or no loss is found in it.') if not current_eval_result or default_key not in current_eval_result: raise ValueError( 'current_eval_result cannot be empty or no loss is found in it.') return best_eval_result[default_key] > current_eval_result[default_key]
44.398496
98
0.637934
13716a3c1d81e757e4d87853e2f35128b1e83ff1
5,329
py
Python
src/pkgcore/restrictions/restriction.py
mgorny/pkgcore
ab4a718aa1626f4edeb385383f5595a1e262b0dc
[ "BSD-3-Clause" ]
null
null
null
src/pkgcore/restrictions/restriction.py
mgorny/pkgcore
ab4a718aa1626f4edeb385383f5595a1e262b0dc
[ "BSD-3-Clause" ]
null
null
null
src/pkgcore/restrictions/restriction.py
mgorny/pkgcore
ab4a718aa1626f4edeb385383f5595a1e262b0dc
[ "BSD-3-Clause" ]
null
null
null
# Copyright: 2005-2012 Brian Harring <[email protected] # Copyright: 2006 Marien Zwart <[email protected]> # License: BSD/GPL2 """ base restriction class """ from functools import partial from snakeoil import caching, klass from snakeoil.currying import pretty_docs def curry_node_type(cls, node_type, extradoc=None): """Helper function for creating restrictions of a certain type. This uses :obj:`partial` to pass a node_type to the wrapped class, and extends the docstring. :param cls: callable (usually a class) that is wrapped. :param node_type: value passed as node_type. :param extradoc: addition to the docstring. Defaults to "Automatically set to %s type." % node_type :return: a wrapped callable. """ if extradoc is None: extradoc = "Automatically set to %s type." % (node_type,) doc = cls.__doc__ result = partial(cls, node_type=node_type) if doc is None: doc = '' else: # do this so indentation on pydoc __doc__ is sane doc = "\n".join(line.lstrip() for line in doc.split("\n")) + "\n" doc += extradoc return pretty_docs(result, doc) value_type = "values" package_type = "package" valid_types = (value_type, package_type)
27.611399
78
0.627698
1372983b8db0190678e8ab88123c2241d2b934f3
797
py
Python
keylime/migrations/versions/8da20383f6e1_extend_ip_field.py
kkaarreell/keylime
e12658bb6dc945b694e298b8ac337a204ab86ed2
[ "Apache-2.0" ]
18
2016-10-19T13:57:32.000Z
2019-01-12T21:35:43.000Z
keylime/migrations/versions/8da20383f6e1_extend_ip_field.py
kkaarreell/keylime
e12658bb6dc945b694e298b8ac337a204ab86ed2
[ "Apache-2.0" ]
72
2019-01-24T10:12:59.000Z
2019-04-17T11:07:16.000Z
keylime/migrations/versions/8da20383f6e1_extend_ip_field.py
kkaarreell/keylime
e12658bb6dc945b694e298b8ac337a204ab86ed2
[ "Apache-2.0" ]
10
2017-03-27T20:58:08.000Z
2018-07-30T12:59:27.000Z
"""extend_ip_field Revision ID: 8da20383f6e1 Revises: eeb702f77d7d Create Date: 2021-01-14 10:50:56.275257 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "8da20383f6e1" down_revision = "eeb702f77d7d" branch_labels = None depends_on = None
18.534884
105
0.72522
1372bb8d33de36c039935d2eac285248cdacdfb7
301
py
Python
token_train/quickdemo(1)(1).py
Tatsuya26/processamento_de_linguagens
e89ab8461bcf3264a79f10b7ebc2208eff271c6c
[ "MIT" ]
null
null
null
token_train/quickdemo(1)(1).py
Tatsuya26/processamento_de_linguagens
e89ab8461bcf3264a79f10b7ebc2208eff271c6c
[ "MIT" ]
null
null
null
token_train/quickdemo(1)(1).py
Tatsuya26/processamento_de_linguagens
e89ab8461bcf3264a79f10b7ebc2208eff271c6c
[ "MIT" ]
null
null
null
import ply.lex as lex tokens =["NUM","OPERADORES"] t_NUM = '\d+' t_OPERADORES = '[+|*|-]' t_ignore='\n\t ' lexer = lex.lex() # 1+2 1-2 1*2 # ola mundo import sys for line in sys.stdin: lexer.input(line) for tok in lexer: print(tok)
13.086957
28
0.584718
1373c11e16cd5b0b44349e8fb881270741a3ce25
1,657
py
Python
ucsrb/migrations/0013_auto_20180710_2040.py
Ecotrust/ucsrb
29d97cf1f21537aaf24f38e7dedc7c8cfccf1f12
[ "MIT" ]
1
2018-07-31T00:58:30.000Z
2018-07-31T00:58:30.000Z
ucsrb/migrations/0013_auto_20180710_2040.py
Ecotrust/ucsrb
29d97cf1f21537aaf24f38e7dedc7c8cfccf1f12
[ "MIT" ]
264
2017-10-24T23:54:52.000Z
2021-10-16T15:40:47.000Z
ucsrb/migrations/0013_auto_20180710_2040.py
Ecotrust/ucsrb
29d97cf1f21537aaf24f38e7dedc7c8cfccf1f12
[ "MIT" ]
1
2019-07-16T06:37:45.000Z
2019-07-16T06:37:45.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-07-10 20:40 from __future__ import unicode_literals from django.db import migrations, models
32.490196
72
0.620398
137402a725b61eaf0c95cb37df89ef2b691ce663
2,015
py
Python
src/ctc/protocols/fei_utils/analytics/payload_crud.py
fei-protocol/checkthechain
ec838f3d0d44af228f45394d9ba8d8eb7f677520
[ "MIT" ]
94
2022-02-15T19:34:49.000Z
2022-03-26T19:26:22.000Z
src/ctc/protocols/fei_utils/analytics/payload_crud.py
fei-protocol/checkthechain
ec838f3d0d44af228f45394d9ba8d8eb7f677520
[ "MIT" ]
7
2022-03-03T02:58:47.000Z
2022-03-11T18:41:05.000Z
src/ctc/protocols/fei_utils/analytics/payload_crud.py
fei-protocol/checkthechain
ec838f3d0d44af228f45394d9ba8d8eb7f677520
[ "MIT" ]
7
2022-02-15T17:53:07.000Z
2022-03-17T19:14:17.000Z
from __future__ import annotations import typing from ctc import spec from . import timestamp_crud from . import metric_crud from . import analytics_spec # def update_payload( # timescale: analytics_spec.Timescale, # old_payload: analytics_spec.AnalyticsPayload, # ) -> analytics_spec.AnalyticsPayload: # new_timestamps = get_new_timestamps( # timescale=timescale, # old_payload=old_payload, # ) # new_blocks = get_new_blocks( # new_timestamps=new_timestamps, # old_payload=old_payload, # ) # new_metrics = get_metrics(blocks=new_blocks) # return combine_new_data( # old_payload=old_payload, # new_metrics=new_metrics, # )
27.22973
69
0.658065
1374addc1e8f402af9273db13845fe70ea5229f1
18,118
py
Python
research/video_prediction/prediction_model.py
mbz/models
98dcd8dbcb1027e4b22f79113018df30da4b8590
[ "Apache-2.0" ]
1
2021-10-05T13:34:44.000Z
2021-10-05T13:34:44.000Z
research/video_prediction/prediction_model.py
mbz/models
98dcd8dbcb1027e4b22f79113018df30da4b8590
[ "Apache-2.0" ]
null
null
null
research/video_prediction/prediction_model.py
mbz/models
98dcd8dbcb1027e4b22f79113018df30da4b8590
[ "Apache-2.0" ]
1
2020-11-14T04:15:00.000Z
2020-11-14T04:15:00.000Z
# Copyright 2016 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model architecture for predictive model, including CDNA, DNA, and STP.""" import numpy as np import tensorflow as tf import tensorflow.contrib.slim as slim from tensorflow.python.platform import flags from tensorflow.contrib.layers.python import layers as tf_layers from lstm_ops import basic_conv_lstm_cell FLAGS = flags.FLAGS # Amount to use when lower bounding tensors RELU_SHIFT = 1e-12 # kernel size for DNA and CDNA. DNA_KERN_SIZE = 5 def kl_divergence(mu, log_sigma): """KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1). Args: mu: mu parameter of the distribution. log_sigma: log(sigma) parameter of the distribution. Returns: the KL loss. """ return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma), axis=1) def construct_latent_tower(images): """Builds convolutional latent tower for stochastic model. At training time this tower generates a latent distribution (mean and std) conditioned on the entire video. This latent variable will be fed to the main tower as an extra variable to be used for future frames prediction. At inference time, the tower is disabled and only returns latents sampled from N(0,1). If the multi_latent flag is on, a different latent for every timestep would be generated. Args: images: tensor of ground truth image sequences Returns: latent_mean: predicted latent mean latent_std: predicted latent standard deviation latent_loss: loss of the latent twoer samples: random samples sampled from standard guassian """ with slim.arg_scope([slim.conv2d], reuse=False): stacked_images = tf.concat(images, 3) latent_enc1 = slim.conv2d( stacked_images, 32, [3, 3], stride=2, scope='latent_conv1', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm1'}) latent_enc2 = slim.conv2d( latent_enc1, 64, [3, 3], stride=2, scope='latent_conv2', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm2'}) latent_enc3 = slim.conv2d( latent_enc2, 64, [3, 3], stride=1, scope='latent_conv3', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm3'}) latent_mean = slim.conv2d( latent_enc3, FLAGS.latent_channels, [3, 3], stride=2, activation_fn=None, scope='latent_mean', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_norm_mean'}) latent_std = slim.conv2d( latent_enc3, FLAGS.latent_channels, [3, 3], stride=2, scope='latent_std', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'latent_std_norm'}) latent_std += FLAGS.latent_std_min divergence = kl_divergence(latent_mean, latent_std) latent_loss = tf.reduce_mean(divergence) if FLAGS.multi_latent: # timestep x batch_size x latent_size samples = tf.random_normal( [FLAGS.sequence_length-1] + latent_mean.shape, 0, 1, dtype=tf.float32) else: # batch_size x latent_size samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32) if FLAGS.inference_time: # No latent tower at inference time, just standard gaussian. return None, None, None, samples else: return latent_mean, latent_std, latent_loss, samples def construct_model(images, actions=None, states=None, iter_num=-1.0, k=-1, use_state=True, num_masks=10, stp=False, cdna=True, dna=False, context_frames=2): """Build convolutional lstm video predictor using STP, CDNA, or DNA. Args: images: tensor of ground truth image sequences actions: tensor of action sequences states: tensor of ground truth state sequences iter_num: tensor of the current training iteration (for sched. sampling) k: constant used for scheduled sampling. -1 to feed in own prediction. use_state: True to include state and action in prediction num_masks: the number of different pixel motion predictions (and the number of masks for each of those predictions) stp: True to use Spatial Transformer Predictor (STP) cdna: True to use Convoluational Dynamic Neural Advection (CDNA) dna: True to use Dynamic Neural Advection (DNA) context_frames: number of ground truth frames to pass in before feeding in own predictions Returns: gen_images: predicted future image frames gen_states: predicted future states Raises: ValueError: if more than one network option specified or more than 1 mask specified for DNA model. """ # Each image is being used twice, in latent tower and main tower. # This is to make sure we are using the *same* image for both, ... # ... given how TF queues work. images = [tf.identity(image) for image in images] if stp + cdna + dna != 1: raise ValueError('More than one, or no network option specified.') batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4] lstm_func = basic_conv_lstm_cell # Generated robot states and images. gen_states, gen_images = [], [] current_state = states[0] if k == -1: feedself = True else: # Scheduled sampling: # Calculate number of ground-truth frames to pass in. num_ground_truth = tf.to_int32( tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k))))) feedself = False # LSTM state sizes and states. lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32])) lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None lstm_state5, lstm_state6, lstm_state7 = None, None, None # Latent tower latent_loss = 0.0 if FLAGS.stochastic_model: latent_tower_outputs = construct_latent_tower(images) latent_mean, latent_std, latent_loss, samples = latent_tower_outputs # Main tower for image, action in zip(images[:-1], actions[:-1]): # Reuse variables after the first timestep. reuse = bool(gen_images) done_warm_start = len(gen_images) > context_frames - 1 with slim.arg_scope( [lstm_func, slim.layers.conv2d, slim.layers.fully_connected, tf_layers.layer_norm, slim.layers.conv2d_transpose], reuse=reuse): if feedself and done_warm_start: # Feed in generated image. prev_image = gen_images[-1] elif done_warm_start: # Scheduled sampling prev_image = scheduled_sample(image, gen_images[-1], batch_size, num_ground_truth) else: # Always feed in ground_truth prev_image = image # Predicted state is always fed back in state_action = tf.concat(axis=1, values=[action, current_state]) enc0 = slim.layers.conv2d( prev_image, 32, [5, 5], stride=2, scope='scale1_conv1', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm1'}) hidden1, lstm_state1 = lstm_func( enc0, lstm_state1, lstm_size[0], scope='state1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') hidden2, lstm_state2 = lstm_func( hidden1, lstm_state2, lstm_size[1], scope='state2') hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3') enc1 = slim.layers.conv2d( hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2') hidden3, lstm_state3 = lstm_func( enc1, lstm_state3, lstm_size[2], scope='state3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4') hidden4, lstm_state4 = lstm_func( hidden3, lstm_state4, lstm_size[3], scope='state4') hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5') enc2 = slim.layers.conv2d( hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3') # Pass in state and action. smear = tf.reshape( state_action, [int(batch_size), 1, 1, int(state_action.get_shape()[1])]) smear = tf.tile( smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1]) if use_state: enc2 = tf.concat(axis=3, values=[enc2, smear]) # Setup latent if FLAGS.stochastic_model: latent = samples if FLAGS.multi_latent: latent = samples[timestep] if not FLAGS.inference_time: latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage, lambda: tf.identity(latent), lambda: latent_mean + tf.exp(latent_std / 2.0) * latent) with tf.control_dependencies([latent]): enc2 = tf.concat([enc2, latent], 3) enc3 = slim.layers.conv2d( enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4') hidden5, lstm_state5 = lstm_func( enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8 hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6') enc4 = slim.layers.conv2d_transpose( hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1') hidden6, lstm_state6 = lstm_func( enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16 hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7') # Skip connection. hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16 enc5 = slim.layers.conv2d_transpose( hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2') hidden7, lstm_state7 = lstm_func( enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32 hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8') # Skip connection. hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32 enc6 = slim.layers.conv2d_transpose( hidden7, hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None, normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm9'}) if dna: # Using largest hidden state for predicting untied conv kernels. enc7 = slim.layers.conv2d_transpose( enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None) else: # Using largest hidden state for predicting a new image layer. enc7 = slim.layers.conv2d_transpose( enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None) # This allows the network to also generate one image from scratch, # which is useful when regions of the image become unoccluded. transformed = [tf.nn.sigmoid(enc7)] if stp: stp_input0 = tf.reshape(hidden5, [int(batch_size), -1]) stp_input1 = slim.layers.fully_connected( stp_input0, 100, scope='fc_stp') transformed += stp_transformation(prev_image, stp_input1, num_masks) elif cdna: cdna_input = tf.reshape(hidden5, [int(batch_size), -1]) transformed += cdna_transformation(prev_image, cdna_input, num_masks, int(color_channels)) elif dna: # Only one mask is supported (more should be unnecessary). if num_masks != 1: raise ValueError('Only one mask is supported for DNA model.') transformed = [dna_transformation(prev_image, enc7)] masks = slim.layers.conv2d_transpose( enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None) masks = tf.reshape( tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])), [int(batch_size), int(img_height), int(img_width), num_masks + 1]) mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks) output = mask_list[0] * prev_image for layer, mask in zip(transformed, mask_list[1:]): output += layer * mask gen_images.append(output) current_state = slim.layers.fully_connected( state_action, int(current_state.get_shape()[1]), scope='state_pred', activation_fn=None) gen_states.append(current_state) return gen_images, gen_states, latent_loss ## Utility functions def stp_transformation(prev_image, stp_input, num_masks): """Apply spatial transformer predictor (STP) to previous image. Args: prev_image: previous image to be transformed. stp_input: hidden layer to be used for computing STN parameters. num_masks: number of masks and hence the number of STP transformations. Returns: List of images transformed by the predicted STP parameters. """ # Only import spatial transformer if needed. from spatial_transformer import transformer identity_params = tf.convert_to_tensor( np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] for i in range(num_masks - 1): params = slim.layers.fully_connected( stp_input, 6, scope='stp_params' + str(i), activation_fn=None) + identity_params transformed.append(transformer(prev_image, params)) return transformed def cdna_transformation(prev_image, cdna_input, num_masks, color_channels): """Apply convolutional dynamic neural advection to previous image. Args: prev_image: previous image to be transformed. cdna_input: hidden lyaer to be used for computing CDNA kernels. num_masks: the number of masks and hence the number of CDNA transformations. color_channels: the number of color channels in the images. Returns: List of images transformed by the predicted CDNA kernels. """ batch_size = int(cdna_input.get_shape()[0]) height = int(prev_image.get_shape()[1]) width = int(prev_image.get_shape()[2]) # Predict kernels using linear function of last hidden layer. cdna_kerns = slim.layers.fully_connected( cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None) # Reshape and normalize. cdna_kerns = tf.reshape( cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) cdna_kerns /= norm_factor # Treat the color channel dimension as the batch dimension since the same # transformation is applied to each color channel. # Treat the batch dimension as the channel dimension so that # depthwise_conv2d can apply a different transformation to each sample. cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) # Swap the batch and channel dimensions. prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) # Transform image. transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME') # Transpose the dimensions to where they belong. transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(transformed, axis=-1) return transformed def dna_transformation(prev_image, dna_input): """Apply dynamic neural advection to previous image. Args: prev_image: previous image to be transformed. dna_input: hidden lyaer to be used for computing DNA transformation. Returns: List of images transformed by the predicted CDNA kernels. """ # Construct translated images. prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]]) image_height = int(prev_image.get_shape()[1]) image_width = int(prev_image.get_shape()[2]) inputs = [] for xkern in range(DNA_KERN_SIZE): for ykern in range(DNA_KERN_SIZE): inputs.append( tf.expand_dims( tf.slice(prev_image_pad, [0, xkern, ykern, 0], [-1, image_height, image_width, -1]), [3])) inputs = tf.concat(axis=3, values=inputs) # Normalize channels to 1. kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT kernel = tf.expand_dims( kernel / tf.reduce_sum( kernel, [3], keep_dims=True), [4]) return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth): """Sample batch with specified mix of ground truth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size num_ground_truth: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x. """ idx = tf.random_shuffle(tf.range(int(batch_size))) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size))) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) return tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps])
37.903766
95
0.669886
137575ac656962b0f9d67245530a471421c965ac
1,657
bzl
Python
junit5/rules.bzl
prashantsharma04/bazel_java_rules
4f80fbe70e1778aa8e3e0ee8aa2f1efc3e44a462
[ "Apache-2.0" ]
1
2020-10-22T06:44:10.000Z
2020-10-22T06:44:10.000Z
junit5/rules.bzl
prashantsharma04/bazel_java_rules
4f80fbe70e1778aa8e3e0ee8aa2f1efc3e44a462
[ "Apache-2.0" ]
5
2020-06-01T22:33:59.000Z
2020-11-01T17:03:06.000Z
junit5/rules.bzl
prashantsharma04/bazel_java_rules
4f80fbe70e1778aa8e3e0ee8aa2f1efc3e44a462
[ "Apache-2.0" ]
1
2020-08-17T07:42:21.000Z
2020-08-17T07:42:21.000Z
load("@rules_jvm_external//:defs.bzl", "artifact") # For more information see # - https://github.com/bmuschko/bazel-examples/blob/master/java/junit5-test/BUILD # - https://github.com/salesforce/bazel-maven-proxy/tree/master/tools/junit5 # - https://github.com/junit-team/junit5-samples/tree/master/junit5-jupiter-starter-bazel def junit5_test(name, srcs, test_package, resources = [], deps = [], runtime_deps = [], **kwargs): """JUnit runner macro""" FILTER_KWARGS = [ "main_class", "use_testrunner", "args", ] for arg in FILTER_KWARGS: if arg in kwargs.keys(): kwargs.pop(arg) junit_console_args = [] if test_package: junit_console_args += ["--select-package", test_package] else: fail("must specify 'test_package'") native.java_test( name = name, srcs = srcs, use_testrunner = False, main_class = "org.junit.platform.console.ConsoleLauncher", args = junit_console_args, deps = deps + [ artifact("org.junit.jupiter:junit-jupiter-api"), artifact("org.junit.jupiter:junit-jupiter-params"), artifact("org.junit.jupiter:junit-jupiter-engine"), artifact("org.hamcrest:hamcrest-library"), artifact("org.hamcrest:hamcrest-core"), artifact("org.hamcrest:hamcrest"), artifact("org.mockito:mockito-core"), ], visibility = ["//java:__subpackages__"], resources = resources, runtime_deps = runtime_deps + [ artifact("org.junit.platform:junit-platform-console"), ], **kwargs )
35.255319
98
0.616174
1376113ee039ab051c772dba764cfe52a310f45d
625
py
Python
tests/mocked_carla.py
fangedward/pylot
a742b3789ee8e7fa2d692ae22bda1e2960ed9345
[ "Apache-2.0" ]
null
null
null
tests/mocked_carla.py
fangedward/pylot
a742b3789ee8e7fa2d692ae22bda1e2960ed9345
[ "Apache-2.0" ]
null
null
null
tests/mocked_carla.py
fangedward/pylot
a742b3789ee8e7fa2d692ae22bda1e2960ed9345
[ "Apache-2.0" ]
null
null
null
# This module provides mocked versions of classes and functions provided # by Carla in our runtime environment.
20.833333
72
0.5776
1377e6c9502e17891e25610fab3c369d6bcdf674
404
py
Python
rgb_to_cmyk.py
Zweizack/fuzzy-rainbow
f69f7eb59971d28a9093a03c1911b41e23cddf2a
[ "MIT" ]
null
null
null
rgb_to_cmyk.py
Zweizack/fuzzy-rainbow
f69f7eb59971d28a9093a03c1911b41e23cddf2a
[ "MIT" ]
null
null
null
rgb_to_cmyk.py
Zweizack/fuzzy-rainbow
f69f7eb59971d28a9093a03c1911b41e23cddf2a
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ee = '\033[1m' green = '\033[32m' yellow = '\033[33m' cyan = '\033[36m' line = cyan+'-' * 0x2D print(ee+line) R,G,B = [float(X) / 0xFF for X in input(f'{yellow}RGB: {green}').split()] K = 1-max(R,G,B) C,M,Y = [round(float((1-X-K)/(1-K) * 0x64),1) for X in [R,G,B]] K = round(K * 0x64,1) print(f'{yellow}CMYK: {green}{C}%, {M}%, {Y}%, {K}%') print(line)
21.263158
73
0.542079
1377f0cfac03b437ee48b39fc8008df7e5dd358b
4,720
py
Python
docs/updatedoc.py
JukeboxPipeline/jukedj
d4159961c819c26792a278981ee68106ee15f3f3
[ "BSD-3-Clause" ]
2
2015-01-22T17:39:05.000Z
2015-02-09T16:47:15.000Z
docs/updatedoc.py
JukeboxPipeline/jukedj
d4159961c819c26792a278981ee68106ee15f3f3
[ "BSD-3-Clause" ]
3
2020-02-12T00:24:58.000Z
2021-06-10T20:05:03.000Z
docs/updatedoc.py
JukeboxPipeline/jukeboxmaya
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python """Builds the documentaion. First it runs gendoc to create rst files for the source code. Then it runs sphinx make. .. Warning:: This will delete the content of the output directory first! So you might loose data. You can use updatedoc.py -nod. Usage, just call:: updatedoc.py -h """ import argparse import os import shutil import sys import gendoc thisdir = os.path.abspath(os.path.dirname(__file__)) def setup_argparse(): """Sets up the argument parser and returns it :returns: the parser :rtype: :class:`argparse.ArgumentParser` :raises: None """ parser = argparse.ArgumentParser( description="Builds the documentaion. First it runs gendoc to create rst files\ for the source code. Then it runs sphinx make.\ WARNING: this will delete the contents of the output dirs. You can use -nod.") ipath = os.path.join(thisdir, '../src') ipath = os.path.abspath(ipath) idefault = [ipath] parser.add_argument('-i', '--input', nargs='+', default=idefault, help='list of input directories. gendoc is called for every\ source dir.\ Default is \'%s\'.' % ', '.join(idefault)) opath = os.path.join(thisdir, 'reference') opath = os.path.abspath(opath) odefault = [opath] parser.add_argument('-o', '--output', nargs='+', default=odefault, help='list of output directories. if you have multiple source\ directories, the corresponding output directorie is used.\ if there are less dirs than for source, the last output dir\ is used for the remaining source dirs.\ WARNING: the output directories are emptied by default. See -nod.\ Default is \'%s\'.' % ', '.join(odefault)) gadefault = ['-T', '-f', '-e', '-o'] parser.add_argument('-ga', '--gendocargs', nargs='*', default=gadefault, help="list of arguments to pass to gendoc. use -gh for info.\ Default is \'%s\'" % ', '.join(gadefault)) parser.add_argument('-nod', '--nodelete', action='store_true', help='Do not empty the output directories first.') parser.add_argument('-gh', '--gendochelp', action='store_true', help='print the help for gendoc and exit') return parser def prepare_dir(directory, delete=True): """Create apidoc dir, delete contents if delete is True. :param directory: the apidoc directory. you can use relative paths here :type directory: str :param delete: if True, deletes the contents of apidoc. This acts like an override switch. :type delete: bool :returns: None :rtype: None :raises: None """ if os.path.exists(directory): if delete: assert directory != thisdir, 'Trying to delete docs! Specify other output dir!' print 'Deleting %s' % directory shutil.rmtree(directory) print 'Creating %s' % directory os.mkdir(directory) else: print 'Creating %s' % directory os.mkdir(directory) def run_gendoc(source, dest, args): """Starts gendoc which reads source and creates rst files in dest with the given args. :param source: The python source directory for gendoc. Can be a relative path. :type source: str :param dest: The destination for the rst files. Can be a relative path. :type dest: str :param args: Arguments for gendoc. See gendoc for more information. :type args: list :returns: None :rtype: None :raises: SystemExit """ args.insert(0, 'gendoc.py') args.append(dest) args.append(source) print 'Running gendoc.main with: %s' % args gendoc.main(args) def main(argv=sys.argv[1:]): """Parse commandline arguments and run the tool :param argv: the commandline arguments. :type argv: list :returns: None :rtype: None :raises: None """ parser = setup_argparse() args = parser.parse_args(argv) if args.gendochelp: sys.argv[0] = 'gendoc.py' genparser = gendoc.setup_parser() genparser.print_help() sys.exit(0) print 'Preparing output directories' print '='*80 for odir in args.output: prepare_dir(odir, not args.nodelete) print '\nRunning gendoc' print '='*80 for i, idir in enumerate(args.input): if i >= len(args.output): odir = args.output[-1] else: odir = args.output[i] run_gendoc(idir, odir, args.gendocargs) if __name__ == '__main__': main()
35.488722
115
0.613559
13783bd8e2a248d44492c03b9013e0d6c16cfd22
478
py
Python
sort/selectionsort.py
vitormrts/sorting-algorithms
5571ce522a7fd33f976fa05b264ed2c253c221b3
[ "MIT" ]
null
null
null
sort/selectionsort.py
vitormrts/sorting-algorithms
5571ce522a7fd33f976fa05b264ed2c253c221b3
[ "MIT" ]
null
null
null
sort/selectionsort.py
vitormrts/sorting-algorithms
5571ce522a7fd33f976fa05b264ed2c253c221b3
[ "MIT" ]
null
null
null
# 1 + (n-1)*[3 + X] = 1 + 3*(n-1) + X*(n-1) = 1 + 3*(n-1) + (n^2 + n - 2)/2 # = (1 - 3 - 1) + (3n + n/2) + (n^2/2) # The complexity is O(n^2)
36.769231
86
0.464435
1379138cdd6c153ab5075c9fd6e443c52181da72
4,618
py
Python
BridgeOptimizer/scriptBuilder/ScriptBuilderBoundaryConditions.py
manuel1618/bridgeOptimizer
273bbf27b2c6273e4aaca55debbd9a10bebf7042
[ "MIT" ]
1
2022-01-20T16:30:04.000Z
2022-01-20T16:30:04.000Z
BridgeOptimizer/scriptBuilder/ScriptBuilderBoundaryConditions.py
manuel1618/bridgeOptimizer
273bbf27b2c6273e4aaca55debbd9a10bebf7042
[ "MIT" ]
13
2022-01-07T14:07:15.000Z
2022-01-29T19:42:48.000Z
BridgeOptimizer/scriptBuilder/ScriptBuilderBoundaryConditions.py
manuel1618/bridgeOptimizer
273bbf27b2c6273e4aaca55debbd9a10bebf7042
[ "MIT" ]
null
null
null
import os from typing import List, Tuple from BridgeOptimizer.datastructure.hypermesh.LoadCollector import LoadCollector from BridgeOptimizer.datastructure.hypermesh.LoadStep import LoadStep from BridgeOptimizer.datastructure.hypermesh.Force import Force from BridgeOptimizer.datastructure.hypermesh.SPC import SPC
47.122449
159
0.622347
137960c813f4163613ba4247cf3f11614175c045
320
py
Python
Lekcija08/script01.py
islamspahic/python-uup
ea7c9c655ad8e678bca5ee52138836732266799f
[ "Apache-2.0" ]
null
null
null
Lekcija08/script01.py
islamspahic/python-uup
ea7c9c655ad8e678bca5ee52138836732266799f
[ "Apache-2.0" ]
null
null
null
Lekcija08/script01.py
islamspahic/python-uup
ea7c9c655ad8e678bca5ee52138836732266799f
[ "Apache-2.0" ]
null
null
null
tajniBroj = 51 broj = 2 while tajniBroj != broj: broj = int(input("Pogodite tajni broj: ")) if tajniBroj == broj: print("Pogodak!") elif tajniBroj < broj: print("Tajni broj je manji od tog broja.") else: print("Tajni broj je veci od tog broja.") print("Kraj programa")
21.333333
50
0.590625
1379b64de3a90f72d35d03219b56d72544b5e73a
2,806
py
Python
tests/algorithms/memory/test_cmac.py
FrostByte266/neupy
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
[ "MIT" ]
801
2015-09-23T09:24:47.000Z
2022-03-29T19:19:03.000Z
tests/algorithms/memory/test_cmac.py
FrostByte266/neupy
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
[ "MIT" ]
277
2015-09-22T19:48:50.000Z
2022-03-11T23:25:32.000Z
tests/algorithms/memory/test_cmac.py
FrostByte266/neupy
4b7127e5e4178b0cce023ba36542f5ad3f1d798c
[ "MIT" ]
194
2015-09-23T15:03:57.000Z
2022-03-31T13:54:46.000Z
import numpy as np from sklearn import metrics from neupy import algorithms from base import BaseTestCase
30.172043
73
0.599786
137a3688b49f0ea26253687c4f9e076efa9114c9
3,075
py
Python
src/ggrc_workflows/models/task_group_object.py
Smotko/ggrc-core
b3abb58b24e7559960d71a94ba79c75539e7fe29
[ "Apache-2.0" ]
null
null
null
src/ggrc_workflows/models/task_group_object.py
Smotko/ggrc-core
b3abb58b24e7559960d71a94ba79c75539e7fe29
[ "Apache-2.0" ]
12
2015-01-08T14:50:19.000Z
2017-11-29T19:37:53.000Z
src/ggrc_workflows/models/task_group_object.py
Smotko/ggrc-core
b3abb58b24e7559960d71a94ba79c75539e7fe29
[ "Apache-2.0" ]
1
2015-01-08T13:25:09.000Z
2015-01-08T13:25:09.000Z
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: [email protected] # Maintained By: [email protected] from sqlalchemy.ext.associationproxy import association_proxy from ggrc import db from ggrc.models.mixins import Mapping from ggrc.models.mixins import Timeboxed from ggrc.models.reflection import PublishOnly
29.285714
78
0.690407
137bf77da0a4c318b41b184deddd1c0849f9d010
11,196
py
Python
verification/tb_template.py
ahmednofal/DFFRAM
7d7ebc28befe12ec3f232c0d2f5b8ea786227d45
[ "Apache-2.0" ]
null
null
null
verification/tb_template.py
ahmednofal/DFFRAM
7d7ebc28befe12ec3f232c0d2f5b8ea786227d45
[ "Apache-2.0" ]
null
null
null
verification/tb_template.py
ahmednofal/DFFRAM
7d7ebc28befe12ec3f232c0d2f5b8ea786227d45
[ "Apache-2.0" ]
null
null
null
# Copyright 2020-2021 The American University in Cairo and the Cloud V Project. # # This file is part of the DFFRAM Memory Compiler. # See https://github.com/Cloud-V/DFFRAM for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. RAM_instantiation = """ /* An auto generated testbench to verify RAM{word_num}x{word_size} Authors: Mohamed Shalan ([email protected]) Ahmed Nofal ([email protected]) */ `define VERBOSE_1 `define VERBOSE_2 `define UNIT_DELAY #1 `define USE_LATCH 1 `define SIZE {word_size}/8 //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/primitives.v" //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/sky130_fd_sc_hd.v" // // Temporary override: IcarusVerilog cannot read these for some reason ^ `include "hd_primitives.v" `include "hd_functional.v" `include "{filename}" module tb_RAM{word_num}x{word_size}; localparam SIZE = `SIZE; localparam A_W = {addr_width}+$clog2(SIZE); localparam M_SZ = 2**A_W; reg CLK; reg [(SIZE-1):0] WE0; reg EN0; reg [(SIZE*8-1):0] Di0; wire [(SIZE*8-1):0] Do0; reg [A_W-1:0] A0, ADDR; reg [7:0] Phase; reg [7:0] RANDOM_BYTE; event done; RAM{word_num} #(.USE_LATCH(`USE_LATCH), .WSIZE(SIZE)) SRAM ( .CLK(CLK), .WE0(WE0), .EN0(EN0), .Di0(Di0), .Do(Do0), .A0(A0[A_W-1:$clog2(SIZE)]) ); initial begin $dumpfile("tb_RAM{word_num}x{word_size}.vcd"); $dumpvars(0, tb_RAM{word_num}x{word_size}); @(done) $finish; end /* Memory golden Model */ reg [(SIZE*8-1):0] RAM[(M_SZ)-1 : 0]; reg [(SIZE*8-1):0] RAM_DATA_RW; genvar c; generate for (c=0; c < SIZE; c = c+1) begin: mem_golden_model always @(posedge CLK) begin if(EN0) begin RAM_DATA_RW <= RAM[A0/SIZE]; if(WE0[c]) RAM[A0/SIZE][8*(c+1)-1:8*c] <= Di0[8*(c+1)-1:8*c]; end end end endgenerate """ begin_single_ported_test = """ initial begin CLK = 0; WE0 = 0; EN0 = 1; """ single_ported_custom_test = """ Phase = 0; // Perform a single word write then read mem_write_word({{SIZE{{8'h90}}}}, 4); mem_read_word_0(4); """ RAM_instantiation_1RW1R = """ /* An auto generated testbench to verify RAM{word_num}x{word_size} Authors: Mohamed Shalan ([email protected]) Ahmed Nofal ([email protected]) */ `define VERBOSE_1 `define VERBOSE_2 `define UNIT_DELAY #1 `define USE_LATCH 1 `define SIZE {word_size}/8 //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/primitives.v" //`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/sky130_fd_sc_hd.v" // // Temporary override: IcarusVerilog cannot read these for some reason ^ `include "hd_primitives.v" `include "hd_functional.v" `include "{filename}" module tb_RAM{word_num}x{word_size}_1RW1R; localparam SIZE = `SIZE; localparam A_W = {addr_width}+$clog2(SIZE); localparam M_SZ = 2**A_W; reg CLK; reg [(SIZE-1):0] WE0; reg EN0; reg ENR; reg [(SIZE*8-1):0] Di0; wire [(SIZE*8-1):0] Do0; wire [(SIZE*8-1):0] Do1; reg [A_W-1:0] A0, A1, ADDR; reg [7:0] Phase; reg [7:0] RANDOM_BYTE; event done; RAM{word_num}_1RW1R #(.USE_LATCH(`USE_LATCH), .WSIZE(`SIZE)) SRAM ( .CLK(CLK), .WE0(WE0), .EN0(EN0), .EN1(ENR), .Di0(Di0), .Do0(Do0), .Do1(Do1), .A0(A0[A_W-1:$clog2(SIZE)]), .A1(A1[A_W-1:$clog2(SIZE)]) ); initial begin $dumpfile("tb_RAM{word_num}x{word_size}_1RW1R.vcd"); $dumpvars(0, tb_RAM{word_num}x{word_size}_1RW1R); @(done) $finish; end /* Memory golden Model */ reg [(SIZE*8-1):0] RAM[(M_SZ)-1 : 0]; reg [(SIZE*8-1):0] RAM_DATA_RW; reg [(SIZE*8-1):0] RAM_DATA_R; genvar c; generate for (c=0; c < SIZE; c = c+1) begin: mem_golden_model always @(posedge CLK) begin if(EN0) begin RAM_DATA_RW <= RAM[A0/SIZE]; if(WE0[c]) RAM[A0/SIZE][8*(c+1)-1:8*c] <= Di0[8*(c+1)-1:8*c]; end if (ENR) begin RAM_DATA_R <= RAM[A1/SIZE]; end end end endgenerate """ begin_dual_ported_test = """ initial begin CLK = 0; WE0 = 0; EN0 = 1; ENR = 1; """ dual_ported_custom_test = """ Phase = 0; // Perform a 2 word write then read 2 words mem_write_word({{SIZE{{8'h90}}}}, 4); mem_write_word({{SIZE{{8'h33}}}}, 8); mem_read_2words(4,8); """ start_test_common = """ always #10 CLK = !CLK; integer i; """ test_port_1RW1R = """ /*********************************************************** Write and read from different ports ************************************************************/ // Fill the memory with a known pattern // Word Write then Read Phase = 1; `ifdef VERBOSE_1 $display("\\nFinished Phase 0, starting Phase 1"); `endif for(i=0; i<M_SZ; i=i+SIZE) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFC ; RANDOM_BYTE = $urandom; mem_write_word( {SIZE{RANDOM_BYTE}}, ADDR); mem_read_word_1( ADDR ); end // HWord Write then Read Phase = 2; `ifdef VERBOSE_1 $display("\\nFinished Phase 1, starting Phase 2"); `endif for(i=0; i<M_SZ; i=i+SIZE/2) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFE; RANDOM_BYTE = $urandom; mem_write_hword( {SIZE/2{RANDOM_BYTE}}, ADDR); mem_read_word_1( ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end // Byte Write then Read Phase = 3; `ifdef VERBOSE_1 $display("\\nFinished Phase 2, starting Phase 3"); `endif for(i=0; i<M_SZ; i=i+1) begin ADDR = (($urandom%M_SZ)); mem_write_byte($urandom%255, ADDR); mem_read_word_1(ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end """ test_port_RW = """ /*********************************************************** Write and read from same port ************************************************************/ Phase = 4; `ifdef VERBOSE_1 $display("\\nFinished Phase 3, starting Phase 4"); `endif for(i=0; i<M_SZ; i=i+SIZE) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFC ; RANDOM_BYTE = $urandom; mem_write_word( {SIZE{RANDOM_BYTE}}, ADDR); mem_read_word_0( ADDR ); end // HWord Write then Read Phase = 5; `ifdef VERBOSE_1 $display("\\nFinished Phase 4, starting Phase 5"); `endif for(i=0; i<M_SZ; i=i+SIZE/2) begin ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFE; RANDOM_BYTE = $urandom; mem_write_hword( {SIZE/2{RANDOM_BYTE}}, ADDR); mem_read_word_0( ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end // Byte Write then Read Phase = 6; `ifdef VERBOSE_1 $display("\\nFinished Phase 5, starting Phase 6"); `endif for(i=0; i<M_SZ; i=i+1) begin ADDR = (($urandom%M_SZ)); mem_write_byte($urandom%255, ADDR); mem_read_word_0(ADDR & {{SIZE-1{8'hFF}}, 8'hFC} ); end $display ("\\n>> Test Passed! <<\\n"); -> done; """ end_test = """ end """ tasks = """ task mem_write_byte(input [7:0] byte, input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr;//[A_WIDTH:2]; WE0 = (1 << addr[$clog2(SIZE)-1:0]); Di0 = (byte << (addr[$clog2(SIZE)-1:0] * 8)); @(posedge CLK); `ifdef VERBOSE_2 $display("WRITE BYTE: 0x%X to %0X(%0D) (0x%X, %B)", byte, addr, addr, Di0, WE0); `endif WE0 = {SIZE{8'h00}}; end endtask task mem_write_hword(input [SIZE*8-1:0] hword, input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr;//[A_WIDTH:$clog2(SIZE)]; WE0 = {{SIZE/2{addr[$clog2(SIZE)-1]}},{SIZE/2{~addr[$clog2(SIZE)-1]}}}; Di0 = (hword << (addr[$clog2(SIZE)-1] * (SIZE/2)*8)); @(posedge CLK); `ifdef VERBOSE_2 $display("WRITE HWORD: 0x%X to %0X(%0D) (0x%X, %B)", hword, addr, addr, Di0, WE0); `endif WE0 = {SIZE{8'h00}}; end endtask task mem_write_word(input [SIZE*8-1:0] word, input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr; WE0 = {SIZE{8'hFF}}; Di0 = word; @(posedge CLK); `ifdef VERBOSE_2 $display("WRITE WORD: 0x%X to %0X(%0D) (0x%X, %B)", word, addr, addr, Di0, WE0); `endif WE0 = {SIZE{8'h00}}; end endtask task mem_read_word_0(input [A_W-1:0] addr); begin @(posedge CLK); A0 = addr;//[9:2]; WE0 = {SIZE{8'h00}}; @(posedge CLK); #5; `ifdef VERBOSE_2 $display("READ WORD: 0x%X from %0D", Do0, addr); `endif check0(); end endtask task check0; begin if(RAM_DATA_RW !== Do0) begin $display("\\n>>Test Failed! <<\\t(Phase: %0d, Iteration: %0d", Phase, i); $display("Address: 0x%X, READ: 0x%X - Should be: 0x%X", A0, Do0, RAM[A0/SIZE]); $fatal(1); end end endtask """ dual_ported_tasks = """ task mem_read_2words(input [A_W-1:0] addr0, input [A_W-1:0] addr1); begin @(posedge CLK); A0= addr0;//[9:2]; A1= addr1;//[9:2]; WE0 = {SIZE{8'h00}}; @(posedge CLK); #5; `ifdef VERBOSE_2 $display("READ WORD0: 0x%X from %0D", Do0, addr0); $display("READ WORD1: 0x%X from %0D", Do1, addr1); `endif check0(); check1(); end endtask task mem_read_word_1(input [A_W-1:0] addr); begin @(posedge CLK); A1 = addr;//[9:2]; WE0 = {SIZE{8'h00}}; @(posedge CLK); #5; `ifdef VERBOSE_2 $display("READ WORD: 0x%X from %0D", Do1, addr); `endif check1(); end endtask task check1; begin if(RAM_DATA_R !== Do1) begin $display("\\n>>Test Failed! <<\\t(Phase: %0d, Iteration: %0d", Phase, i); $display("Address: 0x%X, READ: 0x%X - Should be: 0x%X", A1, Do1, RAM[A1/SIZE]); $fatal(1); end end endtask """ endmodule = """ endmodule """
27.108959
91
0.530368
137dda311b44a103b066cfeaf00c02a9bb814cbf
19,777
py
Python
xclim/indices/_anuclim.py
bzah/xclim
18ceee3f1db2d39355913c1c60ec32ddca6baccc
[ "Apache-2.0" ]
1
2022-02-03T13:46:58.000Z
2022-02-03T13:46:58.000Z
xclim/indices/_anuclim.py
raquel-ucl/xclim
6102e542e6e08072a60879d6200f9340207cd50e
[ "Apache-2.0" ]
2
2021-06-23T09:26:54.000Z
2021-07-26T19:28:41.000Z
xclim/indices/_anuclim.py
raquel-ucl/xclim
6102e542e6e08072a60879d6200f9340207cd50e
[ "Apache-2.0" ]
1
2021-03-02T20:12:28.000Z
2021-03-02T20:12:28.000Z
# noqa: D100 from typing import Optional import numpy as np import xarray from xclim.core.units import ( convert_units_to, declare_units, pint_multiply, rate2amount, units, units2pint, ) from xclim.core.utils import ensure_chunk_size from ._multivariate import ( daily_temperature_range, extreme_temperature_range, precip_accumulation, ) from ._simple import tg_mean from .generic import select_resample_op from .run_length import lazy_indexing # Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start # See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases # -------------------------------------------------- # # ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! # # -------------------------------------------------- # __all__ = [ "temperature_seasonality", "precip_seasonality", "tg_mean_warmcold_quarter", "tg_mean_wetdry_quarter", "prcptot_wetdry_quarter", "prcptot_warmcold_quarter", "prcptot", "prcptot_wetdry_period", "isothermality", ] _xr_argops = { "wettest": xarray.DataArray.argmax, "warmest": xarray.DataArray.argmax, "dryest": xarray.DataArray.argmin, "driest": xarray.DataArray.argmin, "coldest": xarray.DataArray.argmin, } _np_ops = { "wettest": "max", "warmest": "max", "dryest": "min", "driest": "min", "coldest": "min", } # FIXME: src_timestep is not used here. def _anuclim_coeff_var(arr: xarray.DataArray) -> xarray.DataArray: """Calculate the annual coefficient of variation for ANUCLIM indices.""" std = arr.resample(time="YS").std(dim="time") mu = arr.resample(time="YS").mean(dim="time") return std / mu def _from_other_arg( criteria: xarray.DataArray, output: xarray.DataArray, op, freq: str ) -> xarray.DataArray: """Pick values from output based on operation returning an index from criteria. Parameters ---------- criteria : DataArray Series on which operation returning index is applied. output : DataArray Series to be indexed. op : func Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax. freq : str Temporal grouping. Returns ------- DataArray Output values where criteria is met at the given frequency. """ ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output}) dim = "time" return ds.resample(time=freq).map(get_other_op) def _to_quarter( freq: str, pr: Optional[xarray.DataArray] = None, tas: Optional[xarray.DataArray] = None, ) -> xarray.DataArray: """Convert daily, weekly or monthly time series to quarterly time series according to ANUCLIM specifications.""" if freq.upper().startswith("D"): if tas is not None: tas = tg_mean(tas, freq="7D") if pr is not None: # Accumulate on a week # Ensure units are back to a "rate" for rate2amount below pr = convert_units_to(precip_accumulation(pr, freq="7D"), "mm") pr.attrs["units"] = "mm/week" freq = "W" if freq.upper().startswith("W"): window = 13 elif freq.upper().startswith("M"): window = 3 else: raise NotImplementedError( f'Unknown input time frequency "{freq}": must be one of "D", "W" or "M".' ) if tas is not None: tas = ensure_chunk_size(tas, time=np.ceil(window / 2)) if pr is not None: pr = ensure_chunk_size(pr, time=np.ceil(window / 2)) if pr is not None: pram = rate2amount(pr) out = pram.rolling(time=window, center=False).sum() out.attrs = pr.attrs out.attrs["units"] = pram.units if tas is not None: out = tas.rolling(time=window, center=False).mean(skipna=False) out.attrs = tas.attrs out = ensure_chunk_size(out, time=-1) return out
35.190391
128
0.674369
137eb4fb88a280d57e223383a1252ee199ec52e5
14,714
py
Python
bvs/background_verification/report/checks_status_report/checks_status_report.py
vhrspvl/vhrs-bvs
56667039d9cc09ad0b092e5e6c5dd6598ff41e7b
[ "MIT" ]
1
2021-08-19T11:16:47.000Z
2021-08-19T11:16:47.000Z
bvs/background_verification/report/checks_status_report/checks_status_report.py
vhrspvl/vhrs-bvs
56667039d9cc09ad0b092e5e6c5dd6598ff41e7b
[ "MIT" ]
null
null
null
bvs/background_verification/report/checks_status_report/checks_status_report.py
vhrspvl/vhrs-bvs
56667039d9cc09ad0b092e5e6c5dd6598ff41e7b
[ "MIT" ]
4
2018-03-21T05:57:54.000Z
2020-11-26T00:37:29.000Z
# Copyright (c) 2013, VHRS and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _, msgprint from frappe.utils import (cint, cstr, date_diff, flt, getdate, money_in_words, nowdate, rounded, today) from datetime import datetime from datetime import date import datetime from calendar import monthrange
41.68272
130
0.436387
137f6361d1e175bc555153af22f77e79ad507096
369
py
Python
dataset/dataset.py
TeamOfProfGuo/few_shot_baseline
f9ac87b9d309fc417589350d3ce61d3612e2be91
[ "MIT" ]
null
null
null
dataset/dataset.py
TeamOfProfGuo/few_shot_baseline
f9ac87b9d309fc417589350d3ce61d3612e2be91
[ "MIT" ]
null
null
null
dataset/dataset.py
TeamOfProfGuo/few_shot_baseline
f9ac87b9d309fc417589350d3ce61d3612e2be91
[ "MIT" ]
null
null
null
import os DEFAULT_ROOT = './materials' datasets_dt = {}
17.571429
62
0.642276
137fdb8af310be2f7cdaeb72968e537c0108415a
888
py
Python
src/proto_formatter/syntax_parser.py
YiXiaoCuoHuaiFenZi/proto-formatter
ac8c913a8c3854e840aa4f015c026e58ee023b0b
[ "MIT" ]
null
null
null
src/proto_formatter/syntax_parser.py
YiXiaoCuoHuaiFenZi/proto-formatter
ac8c913a8c3854e840aa4f015c026e58ee023b0b
[ "MIT" ]
null
null
null
src/proto_formatter/syntax_parser.py
YiXiaoCuoHuaiFenZi/proto-formatter
ac8c913a8c3854e840aa4f015c026e58ee023b0b
[ "MIT" ]
null
null
null
from .comment import CommentParser from .protobuf import Protobuf from .proto_structures import Syntax
29.6
77
0.657658
138136650386e60020d1b6906281d81dfdc20779
443
py
Python
IPL/app/core/views.py
mgp-git/Flask
f56be0192a3aac550a1dae46394352a68bd53d3d
[ "Apache-2.0" ]
null
null
null
IPL/app/core/views.py
mgp-git/Flask
f56be0192a3aac550a1dae46394352a68bd53d3d
[ "Apache-2.0" ]
null
null
null
IPL/app/core/views.py
mgp-git/Flask
f56be0192a3aac550a1dae46394352a68bd53d3d
[ "Apache-2.0" ]
null
null
null
from flask import render_template, request, Blueprint core = Blueprint('core', __name__)
22.15
64
0.693002
13829c0823e4f1af5270d26df1460fb75ccc8a6b
47,884
py
Python
tests/test_s3.py
tdilauro/circulation-core
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
[ "Apache-2.0" ]
1
2021-11-16T00:58:43.000Z
2021-11-16T00:58:43.000Z
tests/test_s3.py
tdilauro/circulation-core
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
[ "Apache-2.0" ]
16
2021-05-17T19:24:47.000Z
2021-12-15T13:57:34.000Z
tests/test_s3.py
tdilauro/circulation-core
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
[ "Apache-2.0" ]
1
2021-05-12T19:11:52.000Z
2021-05-12T19:11:52.000Z
# encoding: utf-8 import functools import os from urllib.parse import urlsplit import boto3 import botocore import pytest from botocore.exceptions import BotoCoreError, ClientError from mock import MagicMock from parameterized import parameterized from ..mirror import MirrorUploader from ..model import ( DataSource, ExternalIntegration, Hyperlink, Identifier, Representation, create, ) from ..s3 import ( MinIOUploader, MinIOUploaderConfiguration, MockS3Client, MultipartS3Upload, S3AddressingStyle, S3Uploader, S3UploaderConfiguration, ) from ..testing import DatabaseTest from ..util.datetime_helpers import datetime_utc, utc_now def test_mirror_one(self): edition, pool = self._edition(with_license_pool=True) original_cover_location = "http://example.com/a-cover.png" content = open(self.sample_cover_path("test-book-cover.png"), "rb").read() cover, ignore = pool.add_link( Hyperlink.IMAGE, original_cover_location, edition.data_source, Representation.PNG_MEDIA_TYPE, content=content, ) cover_rep = cover.resource.representation assert None == cover_rep.mirrored_at original_epub_location = "https://books.com/a-book.epub" epub, ignore = pool.add_link( Hyperlink.OPEN_ACCESS_DOWNLOAD, original_epub_location, edition.data_source, Representation.EPUB_MEDIA_TYPE, content="i'm an epub", ) epub_rep = epub.resource.representation assert None == epub_rep.mirrored_at s3 = self._create_s3_uploader(client_class=MockS3Client) # Mock final_mirror_url so we can verify that it's called with # the right arguments s3.final_mirror_url = mock_final_mirror_url book_url = "http://books-go/here.epub" cover_url = "http://s3.amazonaws.com/covers-go/here.png" s3.mirror_one(cover.resource.representation, cover_url) s3.mirror_one(epub.resource.representation, book_url) [ [data1, bucket1, key1, args1, ignore1], [data2, bucket2, key2, args2, ignore2], ] = s3.client.uploads # Both representations have had .mirror_url set and been # mirrored to those URLs. assert data1.startswith(b"\x89") assert "covers-go" == bucket1 assert "here.png" == key1 assert Representation.PNG_MEDIA_TYPE == args1["ContentType"] assert (utc_now() - cover_rep.mirrored_at).seconds < 10 assert b"i'm an epub" == data2 assert "books-go" == bucket2 assert "here.epub" == key2 assert Representation.EPUB_MEDIA_TYPE == args2["ContentType"] # In both cases, mirror_url was set to the result of final_mirror_url. assert ( "final_mirror_url was called with bucket books-go, key here.epub" == epub_rep.mirror_url ) assert ( "final_mirror_url was called with bucket covers-go, key here.png" == cover_rep.mirror_url ) # mirrored-at was set when the representation was 'mirrored' for rep in epub_rep, cover_rep: assert (utc_now() - rep.mirrored_at).seconds < 10 assert False == MockMultipartS3Upload.completed assert True == MockMultipartS3Upload.aborted assert "Error!" == rep.mirror_exception rep.mirror_exception = None # Failed during completion with s3.multipart_upload( rep, rep.url, upload_class=AnotherFailingMultipartS3Upload ) as upload: upload.upload_part("Part 1") assert False == MockMultipartS3Upload.completed assert True == MockMultipartS3Upload.aborted assert "Error!" == rep.mirror_exception
35.105572
145
0.56497
1382e742de1eb49e756dcd17000e8fccc4bc6d6c
1,114
py
Python
lbry/scripts/set_build.py
vanshdevgan/lbry-sdk
3624a3b450945235edcf76971e18c898fba67455
[ "MIT" ]
null
null
null
lbry/scripts/set_build.py
vanshdevgan/lbry-sdk
3624a3b450945235edcf76971e18c898fba67455
[ "MIT" ]
null
null
null
lbry/scripts/set_build.py
vanshdevgan/lbry-sdk
3624a3b450945235edcf76971e18c898fba67455
[ "MIT" ]
null
null
null
"""Set the build version to be 'qa', 'rc', 'release'""" import sys import os import re import logging log = logging.getLogger() log.addHandler(logging.StreamHandler()) log.setLevel(logging.DEBUG) if __name__ == '__main__': sys.exit(main())
30.108108
82
0.658887
13836c2a14bcd63d7cbfba39a7fecc1ae843d691
7,087
py
Python
backend/jenkins/pipelines/ansible/utils/testplan_gen.py
gbl1124/hfrd
327d7c1e18704d2e31a2649b40ae1d90353ebe24
[ "Apache-2.0" ]
5
2019-08-02T20:53:57.000Z
2021-06-25T05:16:46.000Z
backend/jenkins/pipelines/ansible/utils/testplan_gen.py
anandbanik/hfrd
7bc1f13bfc9c7d902aec0363d27b089ef68c7eec
[ "Apache-2.0" ]
null
null
null
backend/jenkins/pipelines/ansible/utils/testplan_gen.py
anandbanik/hfrd
7bc1f13bfc9c7d902aec0363d27b089ef68c7eec
[ "Apache-2.0" ]
14
2019-07-01T01:40:50.000Z
2020-03-24T06:14:32.000Z
#!/usr/bin/python import yaml import os import ast import sys from collections import OrderedDict curr_dir = os.getcwd() work_dir = sys.argv[1] network_type = sys.argv[2] testplan_dict = {} testplan_dict["name"] = "System performance test" testplan_dict["description"] = "This test is to create as much chaincode computation load as possible" testplan_dict["runid"] = "RUNID_HERE" if network_type == "ibp": testplan_dict["networkid"] = sys.argv[3] testplan_dict["collectFabricMetrics"] = False testplan_dict["storageclass"] = "default" testplan_dict["saveLog"] = False testplan_dict["continueAfterFail"] = True testplan_dict["tests"] = [] testplan_dict["peernodeAlias"] =[] if os.path.exists(work_dir) != True: print 'certs keyfiles directory do not exist' exit(1) # Load template file with open(curr_dir + "/templates/testplan_template.yml", 'r') as stream: template = yaml.load(stream) channel_create = template["CHANNEL_CREATE"] # channel_join = template["CHANNEL_JOIN"] chaincode_install = template["CHAINCODE_INSTALL"] chaincode_instantiate = template["CHAINCODE_INSTANTIATE"] chaincode_invoke = template["CHAINCODE_INVOKE"] execute_command = template["EXECUTE_COMMAND"] connectionProfile = {} org_list = [] org_list_lowercase = [] orderer_list = [] peer_list = [] org_peers_dict = {} org_anchor_dict ={} allAnchor_list =[] # Load connection profile for orgName in os.listdir(work_dir + '/keyfiles'): if os.path.isfile(work_dir + '/keyfiles/' + orgName + '/connection.yml'): with open(work_dir + '/keyfiles/' + orgName + '/connection.yml', 'r') as stream: connectionProfile = yaml.load(stream) if connectionProfile["orderers"] is None: continue orderer_list = orderer_list + connectionProfile["orderers"].keys() if (connectionProfile["organizations"][orgName.lower()]["peers"] != None): org_list.append(orgName) org_list_lowercase.append(orgName.lower()) org_peers_dict[orgName] = connectionProfile["organizations"][orgName.lower( )]["peers"] peer_list = peer_list + \ connectionProfile["organizations"][orgName.lower( )]["peers"] org_anchor_dict[orgName] = sorted( connectionProfile["organizations"][orgName.lower( )]["peers"])[0] # When there is only peer or orderer, we skip tests. if len(orderer_list) == 0 or len(peer_list) == 0: outputfile =open(work_dir + '/testplan_example.yml','w') outputfile.write("") outputfile.close() exit(0) orderer_list = list(OrderedDict.fromkeys(orderer_list)) peer_list = list(OrderedDict.fromkeys(peer_list)) for orgName in org_list : tempOrgAnchorObj={} tempOrgAnchorObj[orgName+"Anchor"] = org_anchor_dict[orgName] testplan_dict["peernodeAlias"].append(tempOrgAnchorObj) tempOrgPeersObj={} tempOrgPeersObj[orgName+"Peers"] = ','.join(org_peers_dict[orgName]) testplan_dict["peernodeAlias"].append(tempOrgPeersObj) allAnchor_list.append(org_anchor_dict[orgName]) testplan_dict["peernodeAlias"].append({"allAnchors":','.join(allAnchor_list)}) testplan_dict["peernodeAlias"].append({"allPeers":','.join(peer_list)}) print 'org list: ' print org_list_lowercase print 'orderer_list: ' print orderer_list print 'peer_list: ' print peer_list print 'allAnchor_list' print allAnchor_list # CREATE_CHANNEL channel_create["parameters"]["connectionProfile"] = org_list[0] if network_type == 'cello': channel_create["parameters"]["channelConsortium"] = 'FabricConsortium' else: channel_create["parameters"]["channelConsortium"] = 'SampleConsortium' channel_create["parameters"]["channelOrgs"] = ','.join(org_list_lowercase) channel_create["parameters"]["ordererName"] = orderer_list[0] testplan_dict["tests"].append(channel_create) # JOIN_CHANNEL and INSTALL_CHAINCODE join_list = [] install_list = [] for org in org_list: channel_join = template["CHANNEL_JOIN"] channel_join["parameters"]["connectionProfile"] = org channel_join["parameters"]["peers"] = ','.join(org_peers_dict[org]) channel_join["parameters"]["ordererName"] = orderer_list[0] join_list.append(str(channel_join)) # CHAINCODE_INSTALL chaincode_install["parameters"]["connectionProfile"] = org chaincode_install["parameters"]["peers"] = ','.join(org_peers_dict[org]) install_list.append(str(chaincode_install)) for join_org in join_list: join_item = ast.literal_eval(join_org) testplan_dict["tests"].append(join_item) for install_org in install_list: install_item = ast.literal_eval(install_org) testplan_dict["tests"].append(install_item) # CHAINCODE_INSTANTIATE chaincode_instantiate["parameters"]["connectionProfile"] = org_list[0] chaincode_instantiate["parameters"]["peers"] = ','.join(peer_list) # CHAINCODE_INVOKE # Invoke with fixed transaction count : 100 chaincode_invoke["iterationCount"] = '100' chaincode_invoke["parameters"]["connectionProfile"] = org_list[0] chaincode_invoke["parameters"]["peers"] = ','.join(peer_list) chaincoode_invoke_count = str(chaincode_invoke) # Invoke with fixed running duration : 0 hour 10 minutes 0 second. # And enable running tests parallel by setting waitUntilFinish to true chaincode_invoke["iterationCount"] = '0h10m0s' chaincode_invoke["waitUntilFinish"] = False chaincoode_invoke_time = str(chaincode_invoke) # Invoke with fixed running duration : 0 hour 10 minutes 0 second chaincode_invoke["iterationCount"] = '0h10m0s' chaincode_invoke["parameters"]["peers"] = peer_list[0] chaincoode_invoke_parallel = str(chaincode_invoke) testplan_dict["tests"].append(chaincode_instantiate) testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_count)) testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_time)) testplan_dict["tests"].append(ast.literal_eval(chaincoode_invoke_parallel)) # Execute command with default images testplan_dict["tests"].append(ast.literal_eval(str(execute_command))) # Execute command with customized image execute_command["name"] = "execute-command-with-customized-image" execute_command["container"] = "user/ownimage" testplan_dict["tests"].append(ast.literal_eval(str(execute_command))) connYamlStr= yaml.dump(testplan_dict,default_flow_style=False) tempstr= connYamlStr for orgName in org_list : tempstr = tempstr.replace(orgName+"Anchor:",orgName+"Anchor: &"+orgName+"Anchor") tempstr = tempstr.replace(orgName+"Peers:",orgName+"Peers: &"+orgName+"Peers") tempstr = tempstr.replace("allAnchors:","allAnchors: &allAnchors") tempstr = tempstr.replace("allPeers:","allPeers: &allPeers") tempstr = tempstr.replace("runid:","runid: &runid") if network_type == "ibp": tempstr = tempstr.replace("networkid:","networkid: &networkid") # Dump testplan file outputfile =open(work_dir + '/testplan_example.yml','w') outputfile.write(tempstr) outputfile.close()
39.372222
102
0.719769
1383ec6b114d686bf9cab5e588bcd0ec41143a37
1,033
py
Python
dblib/test_lib.py
cyber-fighters/dblib
9743122a55bc265f7551dd9283f381678b2703e4
[ "MIT" ]
null
null
null
dblib/test_lib.py
cyber-fighters/dblib
9743122a55bc265f7551dd9283f381678b2703e4
[ "MIT" ]
1
2019-02-25T09:52:31.000Z
2019-02-25T09:52:31.000Z
dblib/test_lib.py
cyber-fighters/dblib
9743122a55bc265f7551dd9283f381678b2703e4
[ "MIT" ]
null
null
null
"""Collection of tests.""" import pytest import dblib.lib f0 = dblib.lib.Finding('CD spook', 'my_PC', 'The CD drive is missing.') f1 = dblib.lib.Finding('Unplugged', 'my_PC', 'The power cord is unplugged.') f2 = dblib.lib.Finding('Monitor switched off', 'my_PC', 'The monitor is switched off.') def test_add_remove(): """Test function.""" db = dblib.lib.BackyardDB() # regular cases db.add(f0) assert f0 in db.findings assert len(db.findings) == 1 db.add(f1) assert f1 in db.findings assert len(db.findings) == 2 db.add(f2) assert f2 in db.findings assert len(db.findings) == 3 db.add(None) assert len(db.findings) == 3 db.remove(f1) assert f1 not in db.findings assert len(db.findings) == 2 # test exceptions with pytest.raises(TypeError): db.add(1) def test_update(): """Test function.""" db = dblib.lib.BackyardDB() db.add(f0) db.add(f1) db.update(f1, f2) assert f2 in db.findings assert len(db.findings) == 2
23.477273
87
0.629235
1383ee1f9bdf4c8acf135f0e8788f23793efa056
1,627
py
Python
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py
YuanyuanNi/azure-cli
63844964374858bfacd209bfe1b69eb456bd64ca
[ "MIT" ]
3,287
2016-07-26T17:34:33.000Z
2022-03-31T09:52:13.000Z
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py
YuanyuanNi/azure-cli
63844964374858bfacd209bfe1b69eb456bd64ca
[ "MIT" ]
19,206
2016-07-26T07:04:42.000Z
2022-03-31T23:57:09.000Z
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py
YuanyuanNi/azure-cli
63844964374858bfacd209bfe1b69eb456bd64ca
[ "MIT" ]
2,575
2016-07-26T06:44:40.000Z
2022-03-31T22:56:06.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.decorators import Completer from azure.cli.core.commands.client_factory import get_subscription_id from ._client_factory import cf_policy_insights
45.194444
112
0.696374
13842b8b9629fa8e84e1baad24f225b91291bacf
6,553
py
Python
hordak/migrations/0011_auto_20170225_2222.py
CodeBrew-LTD/django-hordak
efdfe503bf38b0a283790c5b4d27bd6bb28155e4
[ "MIT" ]
187
2016-12-12T10:58:11.000Z
2022-03-27T08:14:19.000Z
hordak/migrations/0011_auto_20170225_2222.py
CodeBrew-LTD/django-hordak
efdfe503bf38b0a283790c5b4d27bd6bb28155e4
[ "MIT" ]
62
2016-12-10T00:12:47.000Z
2022-03-16T09:23:05.000Z
hordak/migrations/0011_auto_20170225_2222.py
CodeBrew-LTD/django-hordak
efdfe503bf38b0a283790c5b4d27bd6bb28155e4
[ "MIT" ]
47
2016-12-12T11:07:31.000Z
2022-03-15T20:30:07.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.4 on 2017-02-25 22:22 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import django_smalluuid.models
40.202454
100
0.327789
13844a293f37e75b2bbbb1208093a4013b133018
646
py
Python
Bot Telegram.py
devilnotcry77/devil_not_cry
a9d342d053c788ec6db2d1c5967ed55104b40045
[ "Apache-2.0" ]
null
null
null
Bot Telegram.py
devilnotcry77/devil_not_cry
a9d342d053c788ec6db2d1c5967ed55104b40045
[ "Apache-2.0" ]
null
null
null
Bot Telegram.py
devilnotcry77/devil_not_cry
a9d342d053c788ec6db2d1c5967ed55104b40045
[ "Apache-2.0" ]
null
null
null
from aiogram import Bot, types from aiogram.dispatcher import Dispatcher from aiogram.utils import executor TOKEN = "Token for you bot" bot = Bot(token=TOKEN) dp = Dispatcher(bot) if __name__ == '__main__': executor.start_polling(dp)
32.3
79
0.704334
13847a22eab74e6541bf593e5d68249f802e533f
1,398
py
Python
redactor/utils.py
danlgz/django-wysiwyg-redactor
755927ea2cb9db203c4a002b4da7ebfbf989dd64
[ "BSD-3-Clause" ]
null
null
null
redactor/utils.py
danlgz/django-wysiwyg-redactor
755927ea2cb9db203c4a002b4da7ebfbf989dd64
[ "BSD-3-Clause" ]
null
null
null
redactor/utils.py
danlgz/django-wysiwyg-redactor
755927ea2cb9db203c4a002b4da7ebfbf989dd64
[ "BSD-3-Clause" ]
2
2021-01-27T21:51:38.000Z
2021-03-10T22:31:19.000Z
from django.core.exceptions import ImproperlyConfigured from importlib import import_module try: from django.utils.encoding import force_text except ImportError: from django.utils.encoding import force_unicode as force_text from django.utils.functional import Promise import json
24.964286
71
0.670243
1385311ad77efabb909223d3edfa32108eab2458
4,984
py
Python
timedpid.py
DrGFreeman/PyTools
795e06b5a07f49a990df3c545d2d103b16dd8b4d
[ "MIT" ]
1
2020-04-20T04:45:47.000Z
2020-04-20T04:45:47.000Z
timedpid.py
DrGFreeman/PyTools
795e06b5a07f49a990df3c545d2d103b16dd8b4d
[ "MIT" ]
null
null
null
timedpid.py
DrGFreeman/PyTools
795e06b5a07f49a990df3c545d2d103b16dd8b4d
[ "MIT" ]
1
2020-04-20T04:45:51.000Z
2020-04-20T04:45:51.000Z
# timedpid.py # Source: https://github.com/DrGFreeman/PyTools # # MIT License # # Copyright (c) 2017 Julien de la Bruere-Terreault <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This module defines a simple Proportional - Integral - Derivative (PID) # controller with different time step calculation methods. This is a python # implementation of my Arduino TimedPID library which can be found at # https://github.com/DrGFreeman/TimedPID. Refer to this repository for detailed # documentation. import time
36.647059
80
0.664125
13863d63148372da4df5a7856bdd98b8b8e90e54
3,051
py
Python
pmon/zmq_responder.py
bernd-clemenz/pmon
8b61de4864ffed2d7ee224c283090ed1948533ae
[ "MIT" ]
1
2020-06-01T19:20:09.000Z
2020-06-01T19:20:09.000Z
pmon/zmq_responder.py
bernd-clemenz/pmon
8b61de4864ffed2d7ee224c283090ed1948533ae
[ "MIT" ]
null
null
null
pmon/zmq_responder.py
bernd-clemenz/pmon
8b61de4864ffed2d7ee224c283090ed1948533ae
[ "MIT" ]
null
null
null
# # -*- coding: utf-8-*- # receives messages via zmq and executes some simple # operations. # # (c) ISC Clemenz & Weinbrecht GmbH 2018 # import json import requests import zmq import pmon
29.336538
86
0.561455
138645ddd1d47197cec66a63b6f187e4f2176f57
423
py
Python
test/test_substitute.py
sanskrit/padmini
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
[ "MIT" ]
1
2022-03-01T05:05:04.000Z
2022-03-01T05:05:04.000Z
test/test_substitute.py
sanskrit/padmini
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
[ "MIT" ]
null
null
null
test/test_substitute.py
sanskrit/padmini
8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0
[ "MIT" ]
null
null
null
from padmini import operations as op """ def test_ti(): assert S.ti("ta", "e") == "te" assert S.ti("AtAm", "e") == "Ate" def test_antya(): assert S.antya("ti", "u") == "tu" assert S.antya("te", "Am") == "tAm" """
19.227273
53
0.51773
1388c9a700adcd34c480eb91c770af0acaf65dde
2,186
py
Python
TVSaffiliations/extractemails_nogui.py
kmhambleton/LSST-TVSSC.github.io
2391fcdeddf83321825532aa7d7682b5dcf567f0
[ "CC-BY-3.0" ]
null
null
null
TVSaffiliations/extractemails_nogui.py
kmhambleton/LSST-TVSSC.github.io
2391fcdeddf83321825532aa7d7682b5dcf567f0
[ "CC-BY-3.0" ]
3
2018-06-15T10:12:39.000Z
2022-03-23T23:43:27.000Z
TVSaffiliations/extractemails_nogui.py
kmhambleton/LSST-TVSSC.github.io
2391fcdeddf83321825532aa7d7682b5dcf567f0
[ "CC-BY-3.0" ]
5
2018-03-27T12:53:55.000Z
2019-07-17T15:54:09.000Z
# coding: utf-8 #just prints the emails of members of a group to stdout, #both primary and secondary members # run as # $python extractemails_nogui.py "Tidal Disruption Events" from __future__ import print_function '__author__' == 'Federica Bianco, NYU - GitHub: fedhere' import sys import pandas as pd from argparse import ArgumentParser from config import tvsfile def parse_args(subglist): """ Use ArgParser to build up the arguments we will use in our script """ stored_args = {} # get the script name without the extension & use it to build up # the json filename parser = ArgumentParser(description='Selecting members by subgroup') parser.add_argument('subgroup', action='store', default=None, help='Choose the subgroup affiliation:' + ' -- '.join([s for s in subglist])) args = parser.parse_args() return args if __name__ == '__main__': if tvsfile is None: print ("Required Argument: Google Doc file identifier (if you do not have it email federica!)") sys.exit() TVSMembers = pd.read_csv('https://docs.google.com/spreadsheets/d/' + tvsfile + '/export?gid=0&format=csv', index_col=0) subgroups = TVSMembers.primary.unique() conf = parse_args([x for x in subgroups if str(x) != 'nan']) primary = conf.subgroup secondary = conf.subgroup emails = TVSMembers[TVSMembers.primary == primary]['email'].values print ("These are the members with primary affiliation with " + primary) print ("") print (' '.join([em + ','for em in emails])) emails = TVSMembers[(TVSMembers.secondary == secondary) | (TVSMembers['secondary.1'] == secondary) | (TVSMembers['secondary.2'] == secondary)]['email'].values print ("\n") print ("These are the members with secondary affiliation with " + secondary) print ("") print (' '.join([em + ','for em in emails])) print ("") print ("If you also want their names and affiliations use: ") print ("$python extractemailsW.py " + conf.subgroup)
35.836066
162
0.627173
1388cb066414d3af45386f8ba3a988639cd4786c
20,373
py
Python
cogs/owner.py
Obsidian-Development/JDBot
315b0782126ac36fe934ac3ba2d7132710d58651
[ "MIT" ]
null
null
null
cogs/owner.py
Obsidian-Development/JDBot
315b0782126ac36fe934ac3ba2d7132710d58651
[ "MIT" ]
1
2021-11-09T14:30:49.000Z
2021-11-09T14:31:19.000Z
cogs/owner.py
Obsidian-Development/JDBot
315b0782126ac36fe934ac3ba2d7132710d58651
[ "MIT" ]
null
null
null
from discord.ext import commands, menus import utils import random , discord, os, importlib, mystbin, typing, aioimgur, functools, tweepy import traceback, textwrap from discord.ext.menus.views import ViewMenuPages class SusUsersEmbed(menus.ListPageSource): class TestersEmbed(menus.ListPageSource): def tweepy_post(self, post_text = None): consumer_key = os.getenv('tweet_key') consumer_secret = os.getenv('tweet_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) access_token = os.getenv('tweet_access') access_secret = os.getenv('tweet_token') auth.set_access_token(access_token, access_secret) twitter_api = tweepy.API(auth) return twitter_api.update_status(status = post_text)
38.223265
333
0.676827
138922f3a893ab484911754fbdc916b94b521606
1,341
py
Python
tests/input_files/full_sm_UFO/function_library.py
valassi/mg5amc_test
2e04f23353051f64e1604b23105fe3faabd32869
[ "NCSA" ]
1
2016-07-09T00:05:56.000Z
2016-07-09T00:05:56.000Z
tests/input_files/full_sm_UFO/function_library.py
valassi/mg5amc_test
2e04f23353051f64e1604b23105fe3faabd32869
[ "NCSA" ]
4
2022-03-10T09:13:31.000Z
2022-03-30T16:15:01.000Z
tests/input_files/full_sm_UFO/function_library.py
valassi/mg5amc_test
2e04f23353051f64e1604b23105fe3faabd32869
[ "NCSA" ]
1
2016-07-09T00:06:15.000Z
2016-07-09T00:06:15.000Z
# This file is part of the UFO. # # This file contains definitions for functions that # are extensions of the cmath library, and correspond # either to functions that are in cmath, but inconvenient # to access from there (e.g. z.conjugate()), # or functions that are simply not defined. # # from __future__ import absolute_import __date__ = "22 July 2010" __author__ = "[email protected]" import cmath from .object_library import all_functions, Function # # shortcuts for functions from cmath # complexconjugate = Function(name = 'complexconjugate', arguments = ('z',), expression = 'z.conjugate()') re = Function(name = 're', arguments = ('z',), expression = 'z.real') im = Function(name = 'im', arguments = ('z',), expression = 'z.imag') # New functions (trigonometric) sec = Function(name = 'sec', arguments = ('z',), expression = '1./cmath.cos(z)') asec = Function(name = 'asec', arguments = ('z',), expression = 'cmath.acos(1./z)') csc = Function(name = 'csc', arguments = ('z',), expression = '1./cmath.sin(z)') acsc = Function(name = 'acsc', arguments = ('z',), expression = 'cmath.asin(1./z)')
23.946429
57
0.57047