hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0aec7fad0f474867079a857e5fa0aa0966e20a00 | 2,472 | py | Python | upload_from_folder.py | robinrobinzon/fastpic | 966f1aa8c6d7e98651727e7ed7f6b25970d5da11 | [
"MIT"
] | null | null | null | upload_from_folder.py | robinrobinzon/fastpic | 966f1aa8c6d7e98651727e7ed7f6b25970d5da11 | [
"MIT"
] | null | null | null | upload_from_folder.py | robinrobinzon/fastpic | 966f1aa8c6d7e98651727e7ed7f6b25970d5da11 | [
"MIT"
] | null | null | null | import datetime
import os
import shutil
import tempfile
from joblib import Parallel, delayed
from fastpic_upload import upload_file_to_fastpic
_n_jobs_for_upload = 20
_root_folders_set = (
'/path/to/folder',
)
_spoiler_for_each_file = True
if __name__ == '__main__':
started = datetime.datetime.now()
print(started, 'started')
main()
finished = datetime.datetime.now()
print(finished, 'all done in', finished - started)
| 29.783133 | 106 | 0.651294 |
0aecc3617c0fed4d5c58d568836e4b90d9b9886f | 1,994 | py | Python | tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py | TolyaTalamanov/open_model_zoo | 1697e60712df4ca72635a2080a197b9d3bc24129 | [
"Apache-2.0"
] | 2,201 | 2018-10-15T14:37:19.000Z | 2020-07-16T02:05:51.000Z | tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py | Pandinosaurus/open_model_zoo | 2543996541346418919c5cddfb71e33e2cdef080 | [
"Apache-2.0"
] | 759 | 2018-10-18T07:43:55.000Z | 2020-07-16T01:23:12.000Z | tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/clip_segmentation_mask.py | Pandinosaurus/open_model_zoo | 2543996541346418919c5cddfb71e33e2cdef080 | [
"Apache-2.0"
] | 808 | 2018-10-16T14:03:49.000Z | 2020-07-15T11:41:45.000Z | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .postprocessor import PostprocessorWithSpecificTargets
from ..representation import BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction
from ..config import NumberField, ConfigError
| 38.346154 | 117 | 0.739218 |
0aee1a078e80effb05eed8b8321db099a4b35623 | 1,925 | py | Python | tests/test_utils.py | isabella232/pynacl | b3f6c320569d858ba61d4bdf2ac788564528c1c9 | [
"Apache-2.0"
] | 756 | 2015-01-03T17:49:44.000Z | 2022-03-31T13:54:33.000Z | tests/test_utils.py | isabella232/pynacl | b3f6c320569d858ba61d4bdf2ac788564528c1c9 | [
"Apache-2.0"
] | 540 | 2015-01-02T10:54:33.000Z | 2022-03-05T18:47:01.000Z | tests/test_utils.py | isabella232/pynacl | b3f6c320569d858ba61d4bdf2ac788564528c1c9 | [
"Apache-2.0"
] | 217 | 2015-01-09T00:48:01.000Z | 2022-03-26T08:53:32.000Z | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nacl.secret
import nacl.utils
| 32.083333 | 75 | 0.725195 |
0aefad001e36b9eae9b3eb392972175239563b8d | 2,893 | py | Python | guesstheword.py | Cha0sNation/RandomPython | 7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec | [
"MIT"
] | null | null | null | guesstheword.py | Cha0sNation/RandomPython | 7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec | [
"MIT"
] | null | null | null | guesstheword.py | Cha0sNation/RandomPython | 7ba41d78f27bd90e9c09efcd4d5c26eac93e74ec | [
"MIT"
] | null | null | null | #! /home/cha0snation/anaconda3/bin/python
import random
if __name__ == "__main__":
words, output, word, tries, playing = setup()
while playing:
print("Try to guess the word:")
if tries == 1:
print("You have {0} try left.".format(tries))
else:
print("You have {0} tries left.".format(tries))
# print("DEBUG: word is {0}".format(word))
if output == []:
for i in word:
output.append("_")
for i in range(len(output)):
print("_ ", end="")
else:
print_output(output)
print()
print()
try:
while True:
guess = str(input("Guess: "))
if len(guess) == 1:
break
except (EOFError, KeyboardInterrupt):
print()
break
except ValueError:
print("Invalid guess")
break
print()
guess = check_same(guess, output)
tries = check_letter(guess, word, tries)
if check_finished(output, tries):
choice = input("Do you want to play again ? (y or n): ")
print()
if choice.lower().startswith("y"):
words, output, word, tries, playing = setup()
else:
playing = False
| 24.726496 | 73 | 0.483927 |
0af0f43e75ad092a7a05698be61aa6dca9c4178e | 2,131 | py | Python | web_app/index.py | svakulenk0/ArtDATIS | 29e646f7bcb931e733ee248cc973411ffb18be64 | [
"MIT"
] | null | null | null | web_app/index.py | svakulenk0/ArtDATIS | 29e646f7bcb931e733ee248cc973411ffb18be64 | [
"MIT"
] | 9 | 2020-03-24T17:57:03.000Z | 2022-03-12T00:08:07.000Z | web_app/index.py | svakulenk0/ArtDATIS | 29e646f7bcb931e733ee248cc973411ffb18be64 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Dec 8, 2019
.. codeauthor: svitlana vakulenko
<[email protected]>
Index docs into ES
https://qbox.io/blog/building-an-elasticsearch-index-with-python
'''
from settings import *
import glob
import re
# n first characters for the doc preview
LIMIT_START = 100
txts_path = '%s/artdatis/tagging/OCRed/typed/' % DATA_PATH
text_corpus = []
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
if es.indices.exists(INDEX_NAME):
print("deleting '%s' index..." % (INDEX_NAME))
res = es.indices.delete(index = INDEX_NAME)
print(" response: '%s'" % (res))
request_body = {
"settings" : {
"number_of_shards": 1,
"number_of_replicas": 0
}
}
print("creating '%s' index..." % (INDEX_NAME))
res = es.indices.create(index = INDEX_NAME, body = request_body)
print(" response: '%s'" % (res))
# bulk index the data
print("bulk indexing...")
bulk(es, corpus_iterator())
# sanity check
res = es.search(index = INDEX_NAME, size=2, body={"query": {"match_all": {}}})
print("results:")
for hit in res['hits']['hits']:
print(hit["_source"])
| 30.014085 | 99 | 0.603003 |
0af106828dec53475f13db7b60f12e654896ac46 | 277 | py | Python | src/tokens.py | PythonIsMagic/ponyup | 3b2630d573cd46d0569f713c6d4c3790688dc62d | [
"MIT"
] | 1 | 2022-03-22T12:41:35.000Z | 2022-03-22T12:41:35.000Z | src/tokens.py | PythonIsMagic/ponyup | 3b2630d573cd46d0569f713c6d4c3790688dc62d | [
"MIT"
] | null | null | null | src/tokens.py | PythonIsMagic/ponyup | 3b2630d573cd46d0569f713c6d4c3790688dc62d | [
"MIT"
] | 1 | 2022-03-22T12:41:37.000Z | 2022-03-22T12:41:37.000Z | """
A Token is a button or other object on the table that represents a position, a game state, layer state, or some other piece of info
"""
| 25.181818 | 131 | 0.65343 |
0af1145915916f93873c49da300235d391c3c012 | 95 | py | Python | T05-09/program.py | maa76/SSof-Project1920 | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | [
"MIT"
] | 2 | 2019-11-20T19:26:07.000Z | 2019-11-22T00:42:23.000Z | T05-09/program.py | maa76/SSof-Project1920 | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | [
"MIT"
] | 2 | 2019-11-28T05:21:24.000Z | 2019-11-28T05:21:58.000Z | T05-09/program.py | maa76/SSof-Project1920 | 9b4ad9ac41a648c425fcfcd49cd52ff84e528bde | [
"MIT"
] | 25 | 2019-11-27T01:40:56.000Z | 2019-12-04T23:38:59.000Z | nis=get('nis')
q1="xpto1"
q2=nis + "xpto2"
query=query1.q2
koneksi=0
q=execute(query,koneksi)
| 11.875 | 24 | 0.705263 |
0af1366c588c694d1d5fccc2c589b64a4b89883f | 1,089 | py | Python | Chapter09/interpolation_search.py | Xiangs18/Algorithms-with-Python-Second-Edition | 96844e1ae7054e099772dc691c1f41f15c2bfba5 | [
"MIT"
] | null | null | null | Chapter09/interpolation_search.py | Xiangs18/Algorithms-with-Python-Second-Edition | 96844e1ae7054e099772dc691c1f41f15c2bfba5 | [
"MIT"
] | null | null | null | Chapter09/interpolation_search.py | Xiangs18/Algorithms-with-Python-Second-Edition | 96844e1ae7054e099772dc691c1f41f15c2bfba5 | [
"MIT"
] | null | null | null |
store = [2, 4, 5, 12, 43, 54, 60, 77]
a = interpolation_search(store, 2)
print("Index position of value 2 is ", a)
| 37.551724 | 83 | 0.693297 |
0af19b677c50c3526ce7825f2f9c6b76ac47738c | 715 | py | Python | projects/models.py | javixeneize/asvs-1 | 31e9fdfd2d538c8ed1adf23fcb4f143ef28541c6 | [
"MIT"
] | 1 | 2020-10-01T05:55:39.000Z | 2020-10-01T05:55:39.000Z | projects/models.py | Tasha-Carty-220/asvs | 634cc0e96daedc91d1acc06827ce82e9c13f520d | [
"MIT"
] | null | null | null | projects/models.py | Tasha-Carty-220/asvs | 634cc0e96daedc91d1acc06827ce82e9c13f520d | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.urls import reverse
| 28.6 | 65 | 0.738462 |
0af1a3c68967c05606abe6a22eb2bbc2a17f6f6f | 1,164 | py | Python | tests/serverless/checks/aws/test_AdminPolicyDocument.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | tests/serverless/checks/aws/test_AdminPolicyDocument.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | tests/serverless/checks/aws/test_AdminPolicyDocument.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from checkov.serverless.checks.function.aws.AdminPolicyDocument import check
from checkov.serverless.runner import Runner
from checkov.runner_filter import RunnerFilter
if __name__ == '__main__':
unittest.main()
| 36.375 | 102 | 0.668385 |
0af230c3ec87bec2b40fe4cc74ba6765304b22f0 | 13,752 | py | Python | src/macro_pack.py | lulinsheng/macro_pack | 4e9d0178354bad2aa557298f44ba5d4385a72a2b | [
"Apache-2.0"
] | null | null | null | src/macro_pack.py | lulinsheng/macro_pack | 4e9d0178354bad2aa557298f44ba5d4385a72a2b | [
"Apache-2.0"
] | null | null | null | src/macro_pack.py | lulinsheng/macro_pack | 4e9d0178354bad2aa557298f44ba5d4385a72a2b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# encoding: utf-8
import os
import sys
import getopt
import logging
import shutil
import psutil
from modules.com_run import ComGenerator
from modules.web_server import ListenServer
from modules.Wlisten_server import WListenServer
from modules.payload_builder_factory import PayloadBuilderFactory
from common import utils, mp_session, help
from common.utils import MSTypes
from common.definitions import VERSION, LOGLEVEL
if sys.platform == "win32":
try:
import win32com.client #@UnresolvedImport @UnusedImport
except:
print("Error: Could not find win32com.")
sys.exit(1)
MP_TYPE="Pro"
if utils.checkModuleExist("pro_core"):
from pro_modules.utilities.dcom_run import DcomGenerator
from pro_modules.payload_builders.containers import ContainerGenerator
from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro
from pro_core import arg_mgt_pro, mp_session_pro
else:
MP_TYPE="Community"
from colorama import init
from termcolor import colored
# {PyArmor Protection Code}
# {PyArmor Plugins}
# use Colorama to make Termcolor work on Windows too
init()
WORKING_DIR = "temp"
BANNER = help.getToolPres()
if __name__ == '__main__':
# check if running from explorer, if yes restart from cmd line
# running_from = psutil.Process(os.getpid()).parent().parent().name()
# if running_from == 'explorer.exe':
# os.system("cmd.exe /k \"%s\"" % utils.getRunningApp())
# PyArmor Plugin: checkPlug()
main(sys.argv[1:])
| 40.210526 | 171 | 0.592568 |
0af2fa6e42815eb039756485d8f3d3cde750d905 | 137 | py | Python | faced/const.py | binhmuc/faced | cbc18f552da9c53628d61d56de7dfda451a6e25f | [
"MIT"
] | null | null | null | faced/const.py | binhmuc/faced | cbc18f552da9c53628d61d56de7dfda451a6e25f | [
"MIT"
] | null | null | null | faced/const.py | binhmuc/faced | cbc18f552da9c53628d61d56de7dfda451a6e25f | [
"MIT"
] | null | null | null | import os
MODELS_PATH = os.path.join(os.path.dirname(__file__), "models")
YOLO_SIZE = 288
YOLO_TARGET = 9
CORRECTOR_SIZE = 50
| 15.222222 | 64 | 0.70073 |
0af340336c716992b681bade66c39e840439919b | 6,148 | py | Python | etl/load/elasticsearch.py | bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis | 973dc444eac6d1cc80c020dd8b9a4656f70eeafb | [
"BSD-3-Clause"
] | 3 | 2018-06-04T09:14:55.000Z | 2018-10-25T14:32:03.000Z | etl/load/elasticsearch.py | bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis | 973dc444eac6d1cc80c020dd8b9a4656f70eeafb | [
"BSD-3-Clause"
] | 18 | 2020-06-04T07:08:17.000Z | 2022-02-02T17:02:17.000Z | etl/load/elasticsearch.py | bilalelhoudaigui/plant-brapi-etl-data-lookup-gnpis | 973dc444eac6d1cc80c020dd8b9a4656f70eeafb | [
"BSD-3-Clause"
] | 4 | 2019-04-18T12:53:19.000Z | 2019-11-22T08:53:19.000Z | # Load json bulk files into elasticsearch
import json
import os
import time
import traceback
import elasticsearch
from etl.common.store import list_entity_files
from etl.common.utils import get_folder_path, get_file_path, create_logger, first, replace_template
# Init Elasticsearch and test connection
def init_es_client(url, logger):
es_client = elasticsearch.Elasticsearch([url])
try:
info = es_client.info()
logger.debug('Connected to node "{}" of cluster "{}" on "{}"'.format(info['name'], info['cluster_name'], url))
except elasticsearch.exceptions.ConnectionError as e:
logger.error('Connection error: Elasticsearch unavailable on "{}".\nPlease check your configuration'.format(url))
raise e
return es_client
def check_error(response):
if response.get('errors'):
raise ElasticSearchException(response)
def load_source(source, config, source_bulk_dir, log_dir):
"""
Full Elasticsearch documents indexing
"""
source_name = source['schema:identifier']
action = 'load-elasticsearch-' + source_name
log_file = get_file_path([log_dir, action], ext='.log', recreate=True)
logger = create_logger(source_name, log_file, config['options']['verbose'])
load_config = config['load-elasticsearch']
es_client = init_es_client(load_config['url'], logger)
logger.info("Loading '{}' into elasticsearch '{}'...".format(source_bulk_dir, load_config['url']))
try:
if not os.path.exists(source_bulk_dir):
raise FileNotFoundError(
'No such file or directory: \'{}\'.\n'
'Please make sure you have run the BrAPI extraction and Elasticsearch document transformation'
' before trying to launch the transformation process.'
.format(source_bulk_dir))
bulk_files = list(list_entity_files(source_bulk_dir))
all_document_types = set(map(first, bulk_files))
document_types = load_config.get('document-types') or all_document_types
document_types = document_types.intersection(all_document_types)
index_by_document = dict()
logger.info("Preparing index with template mapping...")
timestamp = int(time.time())
for document_type in document_types:
base_index_name = replace_template(
load_config['index-template'],
{'source': source['schema:identifier'], 'documentType': document_type}
).lower()
create_template(es_client, load_config, document_type, base_index_name, logger)
index_name = base_index_name + '-d' + str(timestamp)
create_index(es_client, index_name, logger)
index_by_document[document_type] = base_index_name, index_name
logger.info("Bulk indexing...")
for document_type, file_path in bulk_files:
if document_type in index_by_document:
base_index_name, index_name = index_by_document[document_type]
bulk_index(es_client, index_name, file_path, logger)
logger.info("Creating index aliases and deleting old indices...")
for document_type, (base_index_name, index_name) in index_by_document.items():
create_alias(es_client, index_name, base_index_name, logger)
new_index, *old_indices = get_indices(es_client, base_index_name)
for old_index in old_indices[1:]:
delete_index(es_client, old_index, logger)
logger.info("SUCCEEDED Loading {}.".format(source_name))
except Exception as e:
logger.debug(traceback.format_exc())
logger.debug(getattr(e, 'long_message', ''))
logger.info("FAILED Loading {} Elasticsearch documents.\n"
"=> Check the logs ({}) for more details."
.format(source_name, log_file))
| 40.183007 | 121 | 0.689655 |
0af3b89835e63f3225a17831847f039cebf091f8 | 6,798 | py | Python | geoplot/crs.py | redfrexx/geoplot | 8231baab0e286f1dec870dd5e8c6c8218e5b5da7 | [
"MIT"
] | null | null | null | geoplot/crs.py | redfrexx/geoplot | 8231baab0e286f1dec870dd5e8c6c8218e5b5da7 | [
"MIT"
] | null | null | null | geoplot/crs.py | redfrexx/geoplot | 8231baab0e286f1dec870dd5e8c6c8218e5b5da7 | [
"MIT"
] | null | null | null | """
This module defines the ``geoplot`` coordinate reference system classes, wrappers on
``cartopy.crs`` objects meant to be used as parameters to the ``projection`` parameter of all
front-end ``geoplot`` outputs. For the list of Cartopy CRS objects this module derives from,
refer to http://scitools.org.uk/cartopy/docs/latest/crs/projections.html.
"""
import cartopy.crs as ccrs
import geopandas as gpd
PlateCarree,\
LambertCylindrical,\
Mercator,\
Miller,\
Mollweide,\
Robinson,\
Sinusoidal,\
InterruptedGoodeHomolosine,\
Geostationary,\
NorthPolarStereo,\
SouthPolarStereo = tuple(
type(name, (LongitudeCentering,), {})
for name in ('PlateCarree',
'LambertCylindrical',
'Mercator',
'Miller',
'Mollweide',
'Robinson',
'Sinusoidal',
'InterruptedGoodeHomolosine',
'Geostationary',
'NorthPolarStereo',
'SouthPolarStereo')
)
Gnomonic = type('Gnomonic', (LatitudeCentering,), {})
AlbersEqualArea,\
AzimuthalEquidistant,\
LambertConformal,\
Orthographic,\
Stereographic,\
TransverseMercator,\
LambertAzimuthalEqualArea,\
UTM,\
OSGB,\
EuroPP,\
OSNI = tuple(
type(name, (Base,), {})
for name in ('AlbersEqualArea',
'AzimuthalEquidistant',
'LambertConformal',
'Orthographic',
'Stereographic',
'TransverseMercator',
'LambertAzimuthalEqualArea',
'UTM',
'OSGB',
'EuroPP',
'OSNI')
)
| 39.523256 | 98 | 0.624595 |
0af3c3569db12057875193547cf2329c8c03ae92 | 581 | py | Python | api/views/stores/att_handler.py | cderwin/maps | 0146260935a749679396022b6d2b1d90b6df2539 | [
"MIT"
] | null | null | null | api/views/stores/att_handler.py | cderwin/maps | 0146260935a749679396022b6d2b1d90b6df2539 | [
"MIT"
] | 7 | 2016-02-09T07:18:48.000Z | 2016-02-09T07:25:40.000Z | api/views/stores/att_handler.py | cderwin/maps | 0146260935a749679396022b6d2b1d90b6df2539 | [
"MIT"
] | null | null | null | from .default_handler import StoresHandler
| 36.3125 | 120 | 0.636833 |
0af3eac5180ad01027c97600a407eb3106203f56 | 349 | py | Python | pythonProject/MUNDO 2/Desafio 54.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | pythonProject/MUNDO 2/Desafio 54.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | 1 | 2021-06-25T15:29:11.000Z | 2021-06-25T15:29:11.000Z | pythonProject/MUNDO 2/Desafio 54.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | #Leia o ano de nascimento de 7 pessoas e mostre quantas ja atingiram a maioridade e quantas ainda no
for c in range(1,8):
p=int(input('Qual o ano de seu nascimento? '))
a=2021-p
if a>= 18:
print('A pessoa numero {} j maior de idade'.format(c))
else:
print('A pessoa numero {} no maior de idade!'.format(c))
| 29.083333 | 101 | 0.638968 |
0af473baeece942d5629ff430bbc40a3d23df7c3 | 559 | py | Python | tmoga/utils/SDE.py | zjg540066169/tmoga | a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61 | [
"Apache-2.0"
] | 2 | 2021-10-06T04:45:52.000Z | 2022-03-20T01:18:05.000Z | tmoga/utils/SDE.py | zjg540066169/tmoga | a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61 | [
"Apache-2.0"
] | 1 | 2022-03-20T01:45:09.000Z | 2022-03-21T15:17:21.000Z | tmoga/utils/SDE.py | zjg540066169/tmoga | a3c3ecd0d72fc7c57fd5e5a624780e7ebf199c61 | [
"Apache-2.0"
] | 3 | 2021-10-09T08:08:44.000Z | 2022-03-20T01:18:07.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Provide function to calculate SDE distance
@auth: Jungang Zou
@date: 2021/05/05
"""
| 25.409091 | 95 | 0.554562 |
0af494f1b9f9c81499c5786b2c2ea3e48e90635b | 1,361 | py | Python | a1.py | pscly/shua_shouji | 1c03056c8f5db4a3a1222b2d31fdf44c3ab07cf6 | [
"MulanPSL-1.0"
] | null | null | null | a1.py | pscly/shua_shouji | 1c03056c8f5db4a3a1222b2d31fdf44c3ab07cf6 | [
"MulanPSL-1.0"
] | null | null | null | a1.py | pscly/shua_shouji | 1c03056c8f5db4a3a1222b2d31fdf44c3ab07cf6 | [
"MulanPSL-1.0"
] | null | null | null | # -*- encoding=utf8 -*-
__author__ = "pscly"
from airtest.core.api import *
from airtest.cli.parser import cli_setup
# from douyin import *
if not cli_setup():
auto_setup(__file__, logdir=True, devices=[
"android://127.0.0.1:5037/decc8da3?cap_method=MINICAP_STREAM&&ori_method=MINICAPORI&&touch_method=MINITOUCH",
])
# script content
print("start...")
print("!")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
wake() #
start_app("com.ss.android.ugc.aweme.lite")
hua = 0
= 0
while 1:
hua += 1
+= 1
if hua == 10:
touch(Template(r"tpl1607564875731.png", record_pos=(-0.404, -0.67), resolution=(1079, 2340)))
sleep(5)
swipe((484, 1711),(531,709))
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
print("-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=")
# generate html report
# from airtest.report.report import simple_report
# simple_report(__file__, logpath=True)
| 23.877193 | 121 | 0.409993 |
0af54c84e47849c156e92dd294fed072b3ed4861 | 1,183 | py | Python | tests/v3_validation/cattlevalidationtest/core/test_logs_api.py | bmdepesa/validation-tests | 23e7ab95ce76744483a0657f790b42a88a93436d | [
"Apache-2.0"
] | 7 | 2015-11-18T17:43:08.000Z | 2021-07-14T09:48:18.000Z | tests/v3_validation/cattlevalidationtest/core/test_logs_api.py | bmdepesa/validation-tests | 23e7ab95ce76744483a0657f790b42a88a93436d | [
"Apache-2.0"
] | 175 | 2015-07-09T18:41:24.000Z | 2021-06-10T21:23:27.000Z | tests/v3_validation/cattlevalidationtest/core/test_logs_api.py | bmdepesa/validation-tests | 23e7ab95ce76744483a0657f790b42a88a93436d | [
"Apache-2.0"
] | 25 | 2015-08-08T04:54:24.000Z | 2021-05-25T21:10:37.000Z | from common_fixtures import * # NOQA
import websocket as ws
import pytest
| 28.853659 | 68 | 0.687236 |
0af5f234889bb24214fc2ee681419b82d7cdaceb | 13,717 | py | Python | models/psg_seed_resnet.py | VITA-Group/Peek-a-Boo | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | [
"MIT"
] | 2 | 2022-01-22T03:57:21.000Z | 2022-01-30T20:44:32.000Z | models/psg_seed_resnet.py | VITA-Group/Peek-a-Boo | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | [
"MIT"
] | null | null | null | models/psg_seed_resnet.py | VITA-Group/Peek-a-Boo | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | [
"MIT"
] | 2 | 2022-01-30T12:26:56.000Z | 2022-03-14T12:42:06.000Z | '''ResNet using PSG in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
from numpy.lib.arraysetops import isin
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.masked_psg_seed_conv import PredictiveSeedConv2d
from masked_layers import layers
# Fixed
NUM_BITS = 32
NUM_BITS_WEIGHT = 32
NUM_BITS_GRAD = None
BIPRECISION = False
PREDICTIVE_FORWARD = False
WRITER = None
WRITER_PREFIX_COUNTER = 0
# Tunable
PREDICTIVE_BACKWARD = True
MSB_BITS = 4
MSB_BITS_WEIGHT = 4
MSB_BITS_GRAD = 8
THRESHOLD = 0.0
SPARSIFY = False
SIGN = True
def conv1x1(in_planes, out_planes, stride=1, input_signed=True, predictive_forward=True, writer_prefix=""):
"1x1 convolution with no padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
def conv3x3(in_planes, out_planes, stride=1, input_signed=False, predictive_forward=True, writer_prefix=""):
"3x3 convolution with padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
def PsgSeedResNet20(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,3,3], in_planes=16, num_classes=num_classes, init_method=init_method)
def PsgSeedResNet18(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet34(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet50(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet101(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet152(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes, init_method=init_method)
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 37.787879 | 139 | 0.679813 |
0af634a53b2ebcc4683b0c1863c9043af5a4905d | 1,090 | py | Python | drybell/drybell_lfs_spark.py | jsnlp/snorkel-tutorials | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | [
"Apache-2.0"
] | 315 | 2019-07-27T22:49:20.000Z | 2022-03-30T10:02:02.000Z | drybell/drybell_lfs_spark.py | jsnlp/snorkel-tutorials | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | [
"Apache-2.0"
] | 133 | 2019-07-25T02:07:37.000Z | 2022-03-29T12:08:32.000Z | drybell/drybell_lfs_spark.py | jsnlp/snorkel-tutorials | b4cda9f918daf77f4011ec1598c08d9bd7e51c39 | [
"Apache-2.0"
] | 173 | 2019-08-13T02:27:11.000Z | 2022-03-30T05:26:40.000Z | from pyspark.sql import Row
from snorkel.labeling.lf import labeling_function
from snorkel.labeling.lf.nlp_spark import spark_nlp_labeling_function
from snorkel.preprocess import preprocessor
from drybell_lfs import load_celebrity_knowledge_base
ABSTAIN = -1
NEGATIVE = 0
POSITIVE = 1
| 26.585366 | 83 | 0.748624 |
0af65b8666e4023ddc4b24aa0b03dd9c64d6dd98 | 15,465 | py | Python | dreamplace/ops/dct/discrete_spectral_transform.py | dongleecsu/DREAMPlace | 86b56521a3eacfb5cadff935631302bf6986a689 | [
"BSD-3-Clause"
] | 12 | 2022-03-01T06:46:42.000Z | 2022-03-27T03:40:45.000Z | dreamplace/ops/dct/discrete_spectral_transform.py | dongleecsu/DREAMPlace | 86b56521a3eacfb5cadff935631302bf6986a689 | [
"BSD-3-Clause"
] | 4 | 2022-03-08T13:00:01.000Z | 2022-03-30T10:07:01.000Z | dreamplace/ops/dct/discrete_spectral_transform.py | dongleecsu/DREAMPlace | 86b56521a3eacfb5cadff935631302bf6986a689 | [
"BSD-3-Clause"
] | 8 | 2022-03-01T06:46:45.000Z | 2022-03-29T12:40:05.000Z | ##
# @file discrete_spectral_transform.py
# @author Yibo Lin
# @date Jun 2018
#
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
import pdb
""" Discrete spectral transformation leveraging fast fourier transform engine.
The math here mainly uses Prosthaphaeresis properties.
The trigonometric identities exploited by prosthaphaeresis relate products of trigonometric functions to sums.
sin(a) sin(b) = 1/2 * (cos(a-b) - cos(a+b))
cos(a) cos(b) = 1/2 * (cos(a-b) + cos(a+b))
sin(a) cos(b) = 1/2 * (sin(a+b) + sin(a-b))
cos(a) sin(b) = 1/2 * (sin(a-b) - sin(a+b))
A 2D FFT performs
y_{u, v} = \sum_i \sum_j x_{i, j} exp(-j*2*pi*u*i/M) exp(-j*2*pi*v*j/N)
= \sum_i \sum_j x_{i, j} exp(-j*2*pi*(u*i/M + v*j/N))
= \sum_i \sum_j x_{i, j} (cos(-2*pi*(u*i/M + v*j/N)) + j sin(-2*pi*(u*i/M + v*j/N))).
By mapping the original image from (i, j) to (i, N-j), we can have (u*i/M - v*j/N) inside exp.
This will enable us to derive various cos/sin transformation by computing FFT twice.
"""
def get_expk(N, dtype, device):
""" Compute 2*exp(-1j*pi*u/(2N)), but not exactly the same.
The actual return is 2*cos(pi*u/(2N)), 2*sin(pi*u/(2N)).
This will make later multiplication easier.
"""
pik_by_2N = torch.arange(N, dtype=dtype, device=device)
pik_by_2N.mul_(np.pi/(2*N))
# cos, sin
# I use sin because the real part requires subtraction
# this will be easier for multiplication
expk = torch.stack([pik_by_2N.cos(), pik_by_2N.sin()], dim=-1)
expk.mul_(2)
return expk.contiguous()
def get_expkp1(N, dtype, device):
""" Compute 2*exp(-1j*pi*(u+1)/(2N)), but not exactly the same.
The actual return is 2*cos(pi*(u+1)/(2N)), 2*sin(pi*(u+1)/(2N))
"""
neg_pik_by_2N = torch.arange(1, N+1, dtype=dtype, device=device)
neg_pik_by_2N.mul_(np.pi/(2*N))
# sin, -cos
# I swap -cos and sin because we need the imag part
# this will be easier for multiplication
expk = torch.stack([neg_pik_by_2N.cos(), neg_pik_by_2N.sin()], dim=-1)
expk.mul_(2)
return expk.contiguous()
def get_perm(N, dtype, device):
""" Compute permutation to generate following array
0, 2, 4, ..., 2*(N//2)-2, 2*(N//2)-1, 2*(N//2)-3, ..., 3, 1
"""
perm = torch.zeros(N, dtype=dtype, device=device)
perm[0:(N-1)//2+1] = torch.arange(0, N, 2, dtype=dtype, device=device)
perm[(N-1)//2+1:] = torch.arange(2*(N//2)-1, 0, -2, dtype=dtype, device=device)
return perm
def dct_2N(x, expk=None):
""" Batch Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2i+1)*u/(2N)),
Impelements the 2N padding trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. Pad x by zeros
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
# pad last dimension
x_pad = F.pad(x, (0, N), 'constant', 0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.rfft(x_pad, signal_ndim=1, normalized=False, onesided=True)[..., 0:N, :]
y.mul_(1.0/N)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# get real part
y.mul_(expk)
# I found add is much faster than sum
#y = y.sum(dim=-1)
return y[..., 0]+y[..., 1]
def dct_N(x, perm=None, expk=None):
""" Batch Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2i+1)*u/(2N)),
Impelements the N permuting trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. permute x such that [a, b, c, d, e, f] becomes [a, c, e, f, d, b]
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if perm is None:
perm = get_perm(N, dtype=torch.int64, device=x.device)
if x.ndimension() <= 1:
x_reorder = x.view([1, N])
else:
x_reorder = x.clone()
# switch from row-major to column-major for speedup
x_reorder.transpose_(dim0=-2, dim1=-1)
#x_reorder = x_reorder[..., perm, :]
x_reorder = x_reorder.index_select(dim=-2, index=perm)
# switch back
x_reorder.transpose_(dim0=-2, dim1=-1)
y = torch.rfft(x_reorder, signal_ndim=1, normalized=False, onesided=False)[..., 0:N, :]
y.mul_(1.0/N)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# get real part
y.mul_(expk)
# I found add is much faster than sum
#y = y.sum(dim=-1)
return y[..., 0]+y[..., 1]
def idct_2N(x, expk=None):
""" Batch Inverse Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expk)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.irfft(x_pad, signal_ndim=1, normalized=False, onesided=False, signal_sizes=[2*N])[..., 0:N]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def dst(x, expkp1=None):
""" Batch Discrete Sine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i sin(pi*(2i+1)*(u+1)/(2N)),
Impelements the 2N padding trick to solve DCT with FFT in the following link,
https://dsp.stackexchange.com/questions/2807/fast-cosine-transform-via-fft
1. Pad x by zeros
2. Perform FFT
3. Multiply by 2*exp(-1j*pi*u/(2N))
4. Extract the real part
"""
# last dimension
N = x.size(-1)
# pad last dimension
x_pad = F.pad(x, (0, N), 'constant', 0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.rfft(x_pad, signal_ndim=1, normalized=False, onesided=True)[..., 1:N+1, :]
if expkp1 is None:
expkp1 = get_expkp1(N, dtype=x.dtype, device=x.device)
# get imag part
y = y[..., 1].mul(expkp1[:, 0]) - y[..., 0].mul(expkp1[:, 1])
return y
def idst(x, expkp1=None):
""" Batch Inverse Discrete Sine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
"""
# last dimension
N = x.size(-1)
if expkp1 is None:
expkp1 = get_expkp1(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expkp1)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
y = torch.irfft(x_pad, signal_ndim=1, normalized=False, onesided=False, signal_sizes=[2*N])[..., 1:N+1]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def idxt(x, cos_or_sin_flag, expk=None):
""" Batch Inverse Discrete Cosine Transformation without normalization to coefficients.
Compute y_u = \sum_i x_i cos(pi*(2u+1)*i/(2N)),
Impelements the 2N padding trick to solve IDCT with IFFT in the following link,
https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/spectral_ops.py
1. Multiply by 2*exp(1j*pi*u/(2N))
2. Pad x by zeros
3. Perform IFFT
4. Extract the real part
@param x batch 1D tensor for conversion
@param cos_or_sin_flag 0 for cosine tranformation and 1 or sine transformation
@param expk 2*exp(j*pi*k/(2N))
"""
# last dimension
N = x.size(-1)
if expk is None:
expk = get_expk(N, dtype=x.dtype, device=x.device)
# multiply by 2*exp(1j*pi*u/(2N))
x_pad = x.unsqueeze(-1).mul(expk)
# pad second last dimension, excluding the complex number dimension
x_pad = F.pad(x_pad, (0, 0, 0, N), 'constant', 0)
if len(x.size()) == 1:
x_pad.unsqueeze_(0)
# the last dimension here becomes -2 because complex numbers introduce a new dimension
# Must use IFFT here
y = torch.ifft(x_pad, signal_ndim=1, normalized=False)[..., 0:N, cos_or_sin_flag]
y.mul_(N)
if len(x.size()) == 1:
y.squeeze_(0)
return y
def dct2_2N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return dct_2N(dct_2N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def dct2_N(x, perm0=None, expk0=None, perm1=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param perm0 with length M
@param expk0 with length M
@param perm1 with length N
@param expk1 with length N
"""
return dct_N(dct_N(x.transpose(dim0=-2, dim1=-1), perm=perm0, expk=expk0).transpose_(dim0=-2, dim1=-1), perm=perm1, expk=expk1)
def idct2_2N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return idct_2N(idct_2N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def idct2_N(x, expk0=None, expk1=None):
""" Batch 2D Discrete Cosine Transformation without normalization to coefficients.
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk0 with length M
@param expk1 with length N
"""
return idct_N(idct_N(x.transpose(dim0=-2, dim1=-1), expk0).transpose_(dim0=-2, dim1=-1), expk1)
def dst2(x, expkp1_0=None, expkp1_1=None):
""" Batch 2D Discrete Sine Transformation without normalization to coefficients.
Compute 1D DST twice.
@param x batch tensor, the 2D part is MxN
@param expkp1_0 with length M
@param expkp1_1 with length N
"""
return dst(dst(x.transpose(dim0=-2, dim1=-1), expkp1_0).transpose_(dim0=-2, dim1=-1), expkp1_1)
def idcct2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Cosine-Cosine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} cos(pi/M*p*(u+0.5)) cos(pi/N*q*(v+0.5))
Compute 1D DCT twice.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 0, expk_1).transpose_(dim0=-2, dim1=-1), 0, expk_0).transpose(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1), 0, expk_1)
def idsct2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Sine-Cosine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} sin(pi/M*p*(u+0.5)) cos(pi/N*q*(v+0.5))
Compute 1D DST and then 1D DCT.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 0, expk_1).transpose_(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1), 0, expk_1)
def idcst2(x, expk_0=None, expk_1=None):
""" Batch 2D Inverse Discrete Cosine-Sine Transformation without normalization to coefficients.
It computes following equation, which is slightly different from standard DCT formulation.
y_{u, v} = \sum_p \sum_q x_{p, q} cos(pi/M*p*(u+0.5)) sin(pi/N*q*(v+0.5))
Compute 1D DCT and then 1D DST.
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
"""
return idxt(idxt(x, 1, expk_1).transpose_(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1)
# return idxt(idxt(x.transpose(dim0=-2, dim1=-1), 0, expk_0).transpose_(dim0=-2, dim1=-1), 1, expk_1)
def idxst_idct(x, expk_0=None, expk_1=None):
'''
Batch 2D Inverse Discrete Sine-Cosine Transformation without normalization to coefficients.
Compute idxst(idct(x))
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
'''
return idxt(idct_N(x, expk_1).transpose_(dim0=-2, dim1=-1), 1, expk_0).transpose_(dim0=-2, dim1=-1)
def idct_idxst(x, expk_0=None, expk_1=None):
'''
Batch 2D Inverse Discrete Cosine-Sine Transformation without normalization to coefficients.
Compute idct(idxst(x)).
@param x batch tensor, the 2D part is MxN
@param expk_0 with length M, 2*exp(-1j*pi*k/(2M))
@param expk_1 with length N, 2*exp(-1j*pi*k/(2N))
'''
return idct_N(idxt(x, 1, expk_1).transpose_(dim0=-2, dim1=-1), expk_0).transpose_(dim0=-2, dim1=-1)
| 35.798611 | 131 | 0.643453 |
0af766c917854c90cf7eae087d9105162f3eb248 | 8,667 | py | Python | py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py | vkuznet/h2o | e08f7014f228cbaecfb21f57379970e6a3ac0756 | [
"Apache-2.0"
] | null | null | null | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e, h2o_glm
import h2o_util
zeroList = [
'Result0 = 0',
]
# the first column should use this
exprList = [
'Result<n> = sum(<keyX>[<col1>])',
]
DO_SUMMARY = False
DO_COMPARE_SUM = False
if __name__ == '__main__':
h2o.unit_main()
| 45.376963 | 151 | 0.572055 |
0af886d3e8e59b20a8f0a8f86ad88dbe765599d2 | 14,441 | py | Python | python/influx/database_tables.py | SA-22C-smoothswing/spectrum-protect-sppmon | 8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6 | [
"Apache-2.0"
] | null | null | null | python/influx/database_tables.py | SA-22C-smoothswing/spectrum-protect-sppmon | 8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6 | [
"Apache-2.0"
] | null | null | null | python/influx/database_tables.py | SA-22C-smoothswing/spectrum-protect-sppmon | 8a9c70f65d9faf6ffc35f3400383dcaa6e0fcbc6 | [
"Apache-2.0"
] | null | null | null | """Provides all database and table structures used for the influx database.
Classes:
Datatype
Database
Table
RetentionPolicy
"""
from __future__ import annotations
from enum import Enum, unique
import re
import json
from typing import Any, Dict, List, Set, Tuple, Union
import influx.influx_queries as Queries
from utils.execption_utils import ExceptionUtils
from utils.influx_utils import InfluxUtils
from utils.spp_utils import SppUtils
def to_dict(self) -> Dict[str, Union[str, int, bool]]:
"""Used to create a dict out of the values, able to compare to influxdb-created dict"""
return {
'name': self.name,
'duration': self.duration,
'shardGroupDuration': self.__shard_duration,
'replicaN': self.__replication,
'default': self.default
}
def __str__(self) -> str:
return f"{self.database.name}.{self.name}"
def __repr__(self) -> str:
return f"Retention Policy: {self.name}"
def __eq__(self, o: object) -> bool:
if(isinstance(o, RetentionPolicy)):
return o.to_dict() == self.to_dict()
return False
def __hash__(self) -> int:
return hash(json.dumps(self.to_dict(), sort_keys=True))
class Table:
"""Represents a measurement in influx. Contains pre-defined tag and field definitions.
Attributes
name - name of table
fields - dict of field name with datatype
tags - tags as list of str
time_key - key name of the timestamp field
retention_policy - retention policy associated with this table
database - table is declared within this database
Methods
split_by_table_def - Split the given dict into a pre-defined set of tags, fields and a timestamp.
"""
__bad_measurement_characters: List[str] = [' ', ',']
"""those chars need to be escaped within a measurement/table name"""
def split_by_table_def(self, mydict: Dict[str, Any]) -> Tuple[
Dict[str, Any], Dict[str, Any], Union[str, int, None]]:
"""Split the given dict into a pre-defined set of tags, fields and a timestamp.
None-Values and empty strings are ignored.
If there are no fields declared, it will split by a default pattern.
Undeclared collums will produce a warning.
This function uses the tag/field and timestamp definiton declared within this table.
Arguments:
self {Table} -- Table with predefined set of tags and fields
mydict {Dict[str, Any]} -- dict with colums as keys. None-Values are ignored
Raises:
ValueError: If no dict is given or not of type dict.
Returns:
(Dict[str, Any], Dict[str, Any], int) -- Tuple of: tags, fields, timestamp
"""
if(not mydict):
raise ValueError("need at least one value in dict to split")
# if table is not defined use default split
if(not self.fields):
return InfluxUtils.default_split(mydict=mydict)
# fill dicts
# table.fields is a dict, we only need the keys
fields: Dict[str, Any] = dict.fromkeys(self.fields.keys(), None)
tags: Dict[str, Any] = dict.fromkeys(self.tags, None)
# what field should be recorded as time
time_stamp_field = self.time_key
# helper variable to only overwrite if it is not the time_stamp_field
time_overwrite_allowed = True
# actualy timestamp saved
time_stamp: Union[str, int, None] = None
for (key, value) in mydict.items():
# Ignore empty entrys
if(value is None or (isinstance(value, str) and not value)):
continue
# Check timestamp value if it matches any of predefined time names
if(key in time_stamp_field or key in InfluxUtils.time_key_names):
# sppmonCTS has lowest priority, only set if otherwise None
if(time_stamp is None and key == SppUtils.capture_time_key):
time_stamp = value
# time_stamp_field is highest priority. Do not overwrite it.
elif(key is time_stamp_field):
time_overwrite_allowed: bool = False
time_stamp = value
# if time_stamp_field is not used yet, overwrite sppmonCaptureTime or others
elif(time_overwrite_allowed):
time_stamp = value
# if no overwrite allowed, continue and drop field
else:
continue
# Otherwise check for Keys or Fields
if(key in fields):
fields[key] = value
elif(key in tags):
tags[key] = value
elif(key in InfluxUtils.time_key_names or key in time_stamp_field):
continue
else:
ExceptionUtils.error_message(f"Not all columns for table {self.name} are declared: {key}")
# before key+"MISSING" : Removed to avoid death-circle on repeated queries.
fields[key] = value
return (tags, fields, time_stamp)
class Database:
"""
Represents a instance of influx database. Define all table definitions within the init method.
Attributes
name - name of the database
tables - tables with predefined tags & fields
retention_policies - Set of all provided Retention Policies
continuous_queries - Set of all provided Continuous Queries
Methods
__getitem__ - [] access on the tables via name. Creates empty table if missing.
"""
def __getitem__(self, table_name: str) -> Table:
"""Aquire a instance of a predefined table, returns a empty table if it was not defined. []-Access.
Arguments:
table_name {str} -- name of the table you want to aquire
Returns:
Table -- Instance of a predefined table, otherwise new empty table
"""
return self.tables.get(table_name, Table(self, table_name))
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'Database: {self.name}'
def __init__(self, name: str):
self.__name: str = name
self.__tables: Dict[str, Table] = {}
self.__retention_policies: Set[RetentionPolicy] = set()
self.__continuous_queries: Set[Queries.ContinuousQuery] = set()
| 36.012469 | 119 | 0.628696 |
0af8af43646ac075b324487dffc3942d97354220 | 1,145 | py | Python | examples/rpc_server_side.py | calendar42/SleekXMPP--XEP-0080- | d7bd5fd29f26a5d7de872a49ff63a353b8043e49 | [
"BSD-3-Clause"
] | 1 | 2016-10-24T05:30:25.000Z | 2016-10-24T05:30:25.000Z | examples/rpc_server_side.py | vijayp/SleekXMPP | b2e7f57334d27f140f079213c2016615b7168742 | [
"BSD-3-Clause"
] | null | null | null | examples/rpc_server_side.py | vijayp/SleekXMPP | b2e7f57334d27f140f079213c2016615b7168742 | [
"BSD-3-Clause"
] | null | null | null | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Dann Martens
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.xep_0009.remote import Endpoint, remote, Remote, \
ANY_ALL
import threading
if __name__ == '__main__':
main()
| 22.019231 | 73 | 0.627074 |
0af8ec7bff7f3176683d674120e0f5944b63d168 | 11,868 | py | Python | lib/TelloAPI.py | wuhuikai/DeepDrone | f4700178a7568fa9e308f34d0223e28635eb7660 | [
"MIT"
] | 1 | 2019-08-19T00:12:56.000Z | 2019-08-19T00:12:56.000Z | lib/TelloAPI.py | wuhuikai/DeepDrone | f4700178a7568fa9e308f34d0223e28635eb7660 | [
"MIT"
] | null | null | null | lib/TelloAPI.py | wuhuikai/DeepDrone | f4700178a7568fa9e308f34d0223e28635eb7660 | [
"MIT"
] | null | null | null | import cv2
import time
import socket
import threading
| 28.528846 | 153 | 0.565133 |
0af8f9b563483812450b36d24892bee1c8265e62 | 388 | py | Python | terrascript/resource/sematext.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | terrascript/resource/sematext.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | terrascript/resource/sematext.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # terrascript/resource/sematext.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:26:36 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.resource.sematext
#
# instead of
#
# >>> import terrascript.resource.sematext.sematext
#
# This is only available for 'official' and 'partner' providers.
from terrascript.resource.sematext.sematext import *
| 25.866667 | 73 | 0.75 |
0af95702c3886ad24fef9b7d2bef0b353d7f0d8a | 5,779 | py | Python | eval_encoder.py | lithium0003/Image2UTF8-Transformer | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | [
"MIT"
] | null | null | null | eval_encoder.py | lithium0003/Image2UTF8-Transformer | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | [
"MIT"
] | null | null | null | eval_encoder.py | lithium0003/Image2UTF8-Transformer | 2620af2a8bdaf332e25b39ce05d610e21e6492fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import numpy as np
import os, time, csv
import tqdm
import umap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import signal
import net
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP']
import net
if __name__ == '__main__':
eval()
| 37.525974 | 167 | 0.554767 |
0afa87a4b421519306afb64f3b1e1263669a468c | 22,351 | py | Python | clipper_admin/clipper_admin/clipper_admin.py | SimonZsx/clipper | 457088be2ebe68c68b94d90389d1308e35b4c844 | [
"Apache-2.0"
] | 2 | 2019-04-24T13:46:28.000Z | 2019-05-28T06:59:26.000Z | clipper_admin/clipper_admin/clipper_admin.py | SimonZsx/clipper | 457088be2ebe68c68b94d90389d1308e35b4c844 | [
"Apache-2.0"
] | null | null | null | clipper_admin/clipper_admin/clipper_admin.py | SimonZsx/clipper | 457088be2ebe68c68b94d90389d1308e35b4c844 | [
"Apache-2.0"
] | 4 | 2019-04-03T11:03:57.000Z | 2019-06-26T08:22:38.000Z | from __future__ import absolute_import, division, print_function
import logging
import docker
import tempfile
import requests
from requests.exceptions import RequestException
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
from google.protobuf.json_format import MessageToDict
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
import grpc
from .rpc import model_pb2_grpc
from .rpc import model_pb2
from .rpc import prediction_pb2_grpc
from .rpc import prediction_pb2
from .rpc import management_pb2
from .rpc import management_pb2_grpc
from .container_manager import CONTAINERLESS_MODEL_IMAGE, ClusterAdapter
from .exceptions import ClipperException, UnconnectedException
from .version import __version__, __registry__
from . import graph_parser
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper" # Used Internally for Test; Not Windows Compatible
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
# logging.basicConfig(
# format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
# datefmt='%y-%m-%d:%H:%M:%S',
# level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
| 39.629433 | 176 | 0.579437 |
0afb2dc8c2daf11d9a82ca819aeffdafacc6c971 | 2,515 | py | Python | graph.py | VaniSHadow/tpGenerator | 2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0 | [
"Unlicense"
] | null | null | null | graph.py | VaniSHadow/tpGenerator | 2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0 | [
"Unlicense"
] | null | null | null | graph.py | VaniSHadow/tpGenerator | 2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0 | [
"Unlicense"
] | null | null | null | import random
import numpy
import copy
| 27.043011 | 113 | 0.622664 |
0afbde7fb6ef3a1d965ab24316c2720252ada994 | 970 | py | Python | csv2googlesheets/to_google_sheets.py | AlexSkrn/csv2googlesheets | 71656dcc6827b1c58ffe80bc55aa6f1ee816f216 | [
"MIT"
] | null | null | null | csv2googlesheets/to_google_sheets.py | AlexSkrn/csv2googlesheets | 71656dcc6827b1c58ffe80bc55aa6f1ee816f216 | [
"MIT"
] | null | null | null | csv2googlesheets/to_google_sheets.py | AlexSkrn/csv2googlesheets | 71656dcc6827b1c58ffe80bc55aa6f1ee816f216 | [
"MIT"
] | null | null | null | """This module provides a console interface to convert CSV to Google Sheets."""
from csv2googlesheets.gapi_authorization import auth_with_google
from csv2googlesheets.gapi_create_sheet import create_sheet
from csv2googlesheets.gapi_write_to_sheet import write_to_sheet
from csv2googlesheets.parse_file import build_spreadsheet_title
from csv2googlesheets.parse_file import parse_file
from csv2googlesheets.parse_cli_args import parse_cli_args
def main():
"""Control the flow of operations to write data from csv to G Sheets."""
cli_args = parse_cli_args()
values = parse_file(path=cli_args.csv)
spreadsheet_title = build_spreadsheet_title(cli_args.csv)
google_service = auth_with_google(path_creds=cli_args.credentials_json)
spreadsheet_id = create_sheet(google_service, spreadsheet_title)
write_to_sheet(
google_service,
sheet_id=spreadsheet_id,
values=values,
)
if __name__ == '__main__':
main()
| 32.333333 | 79 | 0.786598 |
0afbe95b203d0f0c8eb9b8de7581ea6aec0cc6c2 | 450 | py | Python | netforce_account/netforce_account/migrations/credit_remain_cur.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 27 | 2015-09-30T23:53:30.000Z | 2021-06-07T04:56:25.000Z | netforce_account/netforce_account/migrations/credit_remain_cur.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 191 | 2015-10-08T11:46:30.000Z | 2019-11-14T02:24:36.000Z | netforce_account/netforce_account/migrations/credit_remain_cur.py | nfco/netforce | 35252eecd0a6633ab9d82162e9e3ff57d4da029a | [
"MIT"
] | 32 | 2015-10-01T03:59:43.000Z | 2022-01-13T07:31:05.000Z | from netforce.model import get_model
from netforce import migration
from netforce import database
Migration.register()
| 32.142857 | 170 | 0.791111 |
0afc21eecdc60b266d8862b6f28eebf607699a5d | 48,451 | py | Python | chevah/compat/testing/testcase.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 5 | 2016-12-03T22:54:50.000Z | 2021-11-17T11:17:39.000Z | chevah/compat/testing/testcase.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 76 | 2015-01-22T16:00:31.000Z | 2022-02-09T22:13:34.000Z | chevah/compat/testing/testcase.py | chevah/compat | d22e5f551a628f8a1652c9f2eea306e17930cb8f | [
"BSD-3-Clause"
] | 1 | 2016-12-10T15:57:31.000Z | 2016-12-10T15:57:31.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Adi Roiban.
# See LICENSE for details.
"""
TestCase used for Chevah project.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from six import text_type
from six.moves import range
import contextlib
import inspect
import threading
import os
import platform
import socket
import sys
import time
from bunch import Bunch
from mock import patch, Mock
from nose import SkipTest
try:
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import (
_SocketWaker, _UnixWaker, _SIGCHLDWaker
)
from twisted.python.failure import Failure
except ImportError:
# Twisted support is optional.
_SocketWaker = None
_UnixWaker = None
_SIGCHLDWaker = None
from chevah.compat import (
DefaultAvatar,
LocalFilesystem,
process_capabilities,
system_users,
SuperAvatar,
)
from chevah.compat.administration import os_administration
from chevah.compat.testing.assertion import AssertionMixin
from chevah.compat.testing.mockup import mk
from chevah.compat.testing.constant import (
TEST_NAME_MARKER,
)
from chevah.compat.testing.filesystem import LocalTestFilesystem
# For Python below 2.7 we use the separate unittest2 module.
# It comes by default in Python 2.7.
if sys.version_info[0:2] < (2, 7):
from unittest2 import TestCase
# Shut up you linter.
TestCase
else:
from unittest import TestCase
try:
# Import reactor last in case some other modules are changing the reactor.
from twisted.internet import reactor
except ImportError:
reactor = None
def _get_hostname():
"""
Return hostname as resolved by default DNS resolver.
"""
return socket.gethostname()
def _get_os_version():
"""
On non-Linux this is just the os_name.
On Linux is the distribution name and the version.
On Windows it is the `nt` followed by the major and minor NT version.
It is not the marketing name.
We only support the Windows NT family.
See: https://en.wikipedia.org/wiki/Windows_NT#Releases
On OSX it returns `osx` followed by the version.
It is not the version of the underlying Darwin OS.
See: https://en.wikipedia.org/wiki/MacOS#Release_history
"""
if os.name == 'nt':
parts = platform.version().split('.')
return 'nt-%s.%s' % (parts[0], parts[1])
# We are now in Unix zone.
os_name = os.uname()[0].lower()
if os_name == 'darwin':
parts = platform.mac_ver()[0].split('.')
return 'osx-%s.%s' % (parts[0], parts[1])
if os_name == 'sunos':
parts = platform.release().split('.')
return 'solaris-%s' % (parts[1],)
if os_name == 'aix': # noqa:cover
return 'aix-%s.%s' % (platform.version(), platform.release())
if os_name != 'linux':
return process_capabilities.os_name
# We delay the import as it will call lsb_release.
import ld
distro_name = ld.id()
if distro_name == 'arch':
# Arch has no version.
return 'arch'
if distro_name in ['centos', 'ol']:
# Normalize all RHEL variants.
distro_name = 'rhel'
distro_version = ld.version().split('.', 1)[0]
return '%s-%s' % (distro_name, distro_version)
def _get_cpu_type():
"""
Return the CPU type as used in the brink.sh script.
"""
base = platform.processor()
if base == 'aarch64':
return 'arm64'
if base == 'x86_64':
return 'x64'
return base
_CI_NAMES = Bunch(
LOCAL='local',
GITHUB='github-actions',
TRAVIS='travis',
BUILDBOT='buildbot',
UNKNOWN='unknown-ci',
AZURE='azure-pipelines',
)
def _get_ci_name():
"""
Return the name of the CI on which the tests are currently executed.
"""
if os.environ.get('BUILDBOT', '').lower() == 'true':
return _CI_NAMES.BUILDBOT
if os.environ.get('GITHUB_ACTIONS', '').lower() == 'true':
return _CI_NAMES.GITHUB
if os.environ.get('TRAVIS', '').lower() == 'true':
return _CI_NAMES.TRAVIS
if os.environ.get('INFRASTRUCTURE', '') == 'AZUREPIPELINES':
return _CI_NAMES.AZURE
if os.environ.get('CI', '').lower() == 'true':
return _CI_NAMES.UNKNOWN
return _CI_NAMES.LOCAL
def folderInTemp(self, *args, **kwargs):
"""
Create a folder in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.folderInTemp(*args, **kwargs)
def fileInTemp(self, *args, **kwargs):
"""
Create a file in the default temp folder and mark it for cleanup.
"""
kwargs['cleanup'] = self.addCleanup
return mk.fs.fileInTemp(*args, **kwargs)
def assertIn(self, target, source):
"""
Overwrite stdlib to swap the arguments.
"""
if source not in target:
message = u'%s not in %s.' % (repr(source), repr(target))
raise AssertionError(message.encode('utf-8'))
def assertIsInstance(self, expected_type, value, msg=None):
"""
Raise an exception if `value` is not an instance of `expected_type`
"""
# In Python 2.7 isInstance is already defined, but with swapped
# arguments.
if not inspect.isclass(expected_type):
expected_type, value = value, expected_type
if not isinstance(value, expected_type):
raise AssertionError(
"Expecting type %s, but got %s. %s" % (
expected_type, type(value), msg))
def tempPath(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet.
"""
return mk.fs.makePathInTemp(prefix=prefix, suffix=suffix)
def tempPathCleanup(self, prefix='', suffix=''):
"""
Return (path, segments) for a path which is not created yet but which
will be automatically removed.
"""
return mk.fs.pathInTemp(
cleanup=self.addCleanup, prefix=prefix, suffix=suffix)
def tempFile(self, content='', prefix='', suffix='', cleanup=True):
"""
Return (path, segments) for a new file created in temp which is
auto cleaned.
"""
segments = mk.fs.createFileInTemp(prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
if cleanup:
self.addCleanup(mk.fs.deleteFile, segments)
try:
opened_file = mk.fs.openFileForWriting(segments)
opened_file.write(content)
finally:
opened_file.close()
return (path, segments)
def tempFolder(self, name=None, prefix='', suffix=''):
"""
Create a new temp folder and return its path and segments, which is
auto cleaned.
"""
segments = mk.fs.createFolderInTemp(
foldername=name, prefix=prefix, suffix=suffix)
path = mk.fs.getRealPathFromSegments(segments)
self.addCleanup(mk.fs.deleteFolder, segments, recursive=True)
return (path, segments)
class FileSystemTestCase(ChevahTestCase):
"""
Common test case for all file-system tests using a real OS account.
"""
def setUp(self):
super(FileSystemTestCase, self).setUp()
# Initialized only to clean the home folder.
test_filesystem = LocalTestFilesystem(avatar=self.avatar)
test_filesystem.cleanHomeFolder()
class OSAccountFileSystemTestCase(FileSystemTestCase):
"""
Test case for tests that need a dedicated local OS account present.
"""
#: User will be created before running the test case and removed on
#: teardown.
CREATE_TEST_USER = None
| 33.049795 | 78 | 0.593053 |
0afd7a5b152406bcaea034f10b6d1b88302e3d68 | 434 | py | Python | web/snowflake.py | jphacks/C_2118 | a63279e92362e09d1856e3d44edb4793d370fd7a | [
"MIT"
] | null | null | null | web/snowflake.py | jphacks/C_2118 | a63279e92362e09d1856e3d44edb4793d370fd7a | [
"MIT"
] | 5 | 2021-10-30T00:55:45.000Z | 2021-10-30T04:23:36.000Z | web/snowflake.py | jphacks/C_2118 | a63279e92362e09d1856e3d44edb4793d370fd7a | [
"MIT"
] | null | null | null | import time
| 24.111111 | 74 | 0.546083 |
0afd820091335019ca4a87a89952513413136cc0 | 69 | py | Python | src/metarl/tf/plotter/__init__.py | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d | [
"MIT"
] | 2 | 2020-03-15T14:35:15.000Z | 2021-02-15T16:38:00.000Z | src/metarl/tf/plotter/__init__.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | null | null | null | src/metarl/tf/plotter/__init__.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | 1 | 2020-02-24T03:04:23.000Z | 2020-02-24T03:04:23.000Z | from metarl.tf.plotter.plotter import Plotter
__all__ = ['Plotter']
| 17.25 | 45 | 0.768116 |
0afe13064838542a197bda7a6f3924d3d020b310 | 1,912 | py | Python | generative_deep_learning/build_network.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | generative_deep_learning/build_network.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | generative_deep_learning/build_network.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | from keras import layers
# Single-layer LSTM model for next-character prediction
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
# Model compilation configuration
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# Function to sample the next character given the models predictions
# Text-generation loop
import sys
import random
# Trains the model for 60 epochs
for epoch in range(1, 60):
print(f'Epoch: {epoch}')
model.fit(x, y, batch_size=128, epochs=1)
# Selects a text seed at random
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print(f'--- Generating with seed: {generated_text} ---')
# Tries a range of different sampling temperatures
for temperature in [0.2, 0.5, 1.0, 1.2]:
print(f'--- Temperature {temperature} ---')
sys.stdout.write(generated_text)
# Generates 400 characters, starting from the seed text
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
# Samples the next character
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
| 33.54386 | 69 | 0.668933 |
0afe1be38b21464d40015306f8c1387f5700680f | 4,962 | py | Python | tests/test_structure_learning.py | thunderbug1/pyanom | e442bff70a4d1880a9a698c020287edf1933d498 | [
"MIT"
] | null | null | null | tests/test_structure_learning.py | thunderbug1/pyanom | e442bff70a4d1880a9a698c020287edf1933d498 | [
"MIT"
] | null | null | null | tests/test_structure_learning.py | thunderbug1/pyanom | e442bff70a4d1880a9a698c020287edf1933d498 | [
"MIT"
] | null | null | null | import io
import unittest
import numpy as np
if __name__ == '__main__':
unittest.main()
| 48.174757 | 77 | 0.438734 |
0afe544e807773d996329c44f23a45f84862abbe | 2,610 | py | Python | examples/MDF/states.py | 29riyasaxena/MDF | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | [
"Apache-2.0"
] | 12 | 2021-01-18T20:38:21.000Z | 2022-03-29T15:01:10.000Z | examples/MDF/states.py | 29riyasaxena/MDF | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | [
"Apache-2.0"
] | 101 | 2020-12-14T15:23:07.000Z | 2022-03-31T17:06:19.000Z | examples/MDF/states.py | 29riyasaxena/MDF | 476e6950d0f14f29463eb4f6e3be518dfb2160a5 | [
"Apache-2.0"
] | 15 | 2020-12-04T22:37:14.000Z | 2022-03-31T09:48:03.000Z | """
Example of ModECI MDF - Testing state variables
"""
from modeci_mdf.mdf import *
import sys
if __name__ == "__main__":
main()
| 25.841584 | 104 | 0.591188 |
0afefe0acef029f680c5802bbedac80261a2e2f4 | 8,958 | py | Python | gremlin-python/src/main/jython/tests/driver/test_client.py | jseekamp/tinkerpop | 5f7b7d2c4353cf2d8ee48eed6c0e5632666d16c0 | [
"Apache-2.0"
] | 1 | 2019-06-24T09:10:32.000Z | 2019-06-24T09:10:32.000Z | gremlin-python/src/main/jython/tests/driver/test_client.py | jseekamp/tinkerpop | 5f7b7d2c4353cf2d8ee48eed6c0e5632666d16c0 | [
"Apache-2.0"
] | 4 | 2021-01-21T01:33:32.000Z | 2022-01-21T23:48:58.000Z | gremlin-python/src/main/jython/tests/driver/test_client.py | jseekamp/tinkerpop | 5f7b7d2c4353cf2d8ee48eed6c0e5632666d16c0 | [
"Apache-2.0"
] | null | null | null | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import pytest
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.client import Client
from gremlin_python.driver.protocol import GremlinServerError
from gremlin_python.driver.request import RequestMessage
from gremlin_python.process.strategies import OptionsStrategy
from gremlin_python.process.graph_traversal import __
from gremlin_python.structure.graph import Graph
__author__ = 'David M. Brown ([email protected])'
| 36.713115 | 113 | 0.667002 |
e40074d263a071da246090065d0ad8ae39b4da28 | 20,118 | py | Python | gaia_tools/xmatch/__init__.py | henrysky/gaia_tools | c151a1d8f6896d8ef5a379291baa8a1f027bd53b | [
"MIT"
] | 44 | 2016-09-13T06:37:46.000Z | 2022-02-03T20:59:56.000Z | gaia_tools/xmatch/__init__.py | henrysky/gaia_tools | c151a1d8f6896d8ef5a379291baa8a1f027bd53b | [
"MIT"
] | 24 | 2016-10-18T23:26:15.000Z | 2020-12-08T18:24:27.000Z | gaia_tools/xmatch/__init__.py | henrysky/gaia_tools | c151a1d8f6896d8ef5a379291baa8a1f027bd53b | [
"MIT"
] | 18 | 2016-10-18T22:26:45.000Z | 2021-08-20T09:07:31.000Z | # Tools for cross-matching catalogs
import csv
import sys
import os
import os.path
import platform
import shutil
import subprocess
import tempfile
import warnings
WIN32= platform.system() == 'Windows'
import numpy
import astropy.coordinates as acoords
from astropy.table import Table
from astropy import units as u
from ..load.download import _ERASESTR
def xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',epoch1=None,
colRA2='RA',colDec2='DEC',epoch2=None,
colpmRA2='pmra',colpmDec2='pmdec',
swap=False,
col_field=None):
"""
NAME:
xmatch
PURPOSE:
cross-match two catalogs (incl. proper motion in cat2 if epochs are different)
INPUT:
cat1 - First catalog
cat2 - Second catalog
maxdist= (2) maximum distance in arcsec
colRA1= ('RA') name of the tag in cat1 with the right ascension in degree in cat1 (assumed to be ICRS)
colDec1= ('DEC') name of the tag in cat1 with the declination in degree in cat1 (assumed to be ICRS)
epoch1= (2000.) epoch of the coordinates in cat1
colRA2= ('RA') name of the tag in cat2 with the right ascension in degree in cat2 (assumed to be ICRS)
colDec2= ('DEC') name of the tag in cat2 with the declination in degree in cat2 (assumed to be ICRS)
epoch2= (2000.) epoch of the coordinates in cat2
colpmRA2= ('pmra') name of the tag in cat2 with the proper motion in right ascension in degree in cat2 (assumed to be ICRS; includes cos(Dec)) [only used when epochs are different]
colpmDec2= ('pmdec') name of the tag in cat2 with the proper motion in declination in degree in cat2 (assumed to be ICRS) [only used when epochs are different]
swap= (False) if False, find closest matches in cat2 for each cat1 source, if False do the opposite (important when one of the catalogs has duplicates)
col_field= (None) if None, simply cross-match on RA and Dec; if a string, then cross-match on RA and Dec with additional matching in the data tag specified by the string
OUTPUT:
(index into cat1 of matching objects,
index into cat2 of matching objects,
angular separation between matching objects)
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2019-07-07 - add additional catalog field matching - Leung (UofT)
"""
if epoch1 is None:
if 'ref_epoch' in cat1.dtype.fields:
epoch1= cat1['ref_epoch']
else:
epoch1= 2000.
if epoch2 is None:
if 'ref_epoch' in cat2.dtype.fields:
epoch2= cat2['ref_epoch']
else:
epoch2= 2000.
_check_epoch(cat1,epoch1)
_check_epoch(cat2,epoch2)
depoch= epoch2-epoch1
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat2[colpmRA2]/numpy.cos(cat2[colDec2]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat2[colpmDec2]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat2[colpmRA2])]= 0.
ddec[numpy.isnan(cat2[colpmDec2])]= 0.
else:
dra= 0.
ddec= 0.
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2]-dra,cat2[colDec2]-ddec,
unit=(u.degree, u.degree),frame='icrs')
if col_field is not None:
try: # check if the field actually exists in both cat1/cat2
cat1[col_field]
cat2[col_field]
except KeyError: # python 2/3 format string
raise KeyError("'%s' does not exist in both catalog" % col_field)
uniques = numpy.unique(cat1[col_field])
if swap: # times neg one to indicate those indices untouch will be noticed at the end and filtered out
d2d = numpy.ones(len(cat2)) * -1.
idx = numpy.zeros(len(cat2), dtype=int)
else:
d2d = numpy.ones(len(cat1)) * -1.
idx = numpy.zeros(len(cat1), dtype=int)
for unique in uniques: # loop over the class
idx_1 = numpy.arange(cat1[colRA1].shape[0])[cat1[col_field] == unique]
idx_2 = numpy.arange(cat2[colRA2].shape[0])[cat2[col_field] == unique]
if idx_1.shape[0] == 0 or idx_2.shape[0] == 0: # the case where a class only exists in one but not the other
continue
if swap:
temp_idx, temp_d2d, d3d = mc2[idx_2].match_to_catalog_sky(mc1[idx_1])
m1 = numpy.arange(len(cat2))
idx[cat2[col_field] == unique] = idx_1[temp_idx]
d2d[cat2[col_field] == unique] = temp_d2d
else:
temp_idx, temp_d2d, d3d = mc1[idx_1].match_to_catalog_sky(mc2[idx_2])
m1 = numpy.arange(len(cat1))
idx[cat1[col_field] == unique] = idx_2[temp_idx]
d2d[cat1[col_field] == unique] = temp_d2d
d2d = d2d * temp_d2d.unit # make sure finally we have an unit on d2d array s.t. "<" operation can complete
else:
if swap:
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
m1= numpy.arange(len(cat2))
else:
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
# to make sure filtering out all neg ones which are untouched
mindx= ((d2d < maxdist*u.arcsec) & (0.*u.arcsec <= d2d))
m1= m1[mindx]
m2= idx[mindx]
if swap:
return (m2,m1,d2d[mindx])
else:
return (m1,m2,d2d[mindx])
def cds(cat,xcat='vizier:I/350/gaiaedr3',maxdist=2,colRA='RA',colDec='DEC',
selection='best',epoch=None,colpmRA='pmra',colpmDec='pmdec',
savefilename=None,gaia_all_columns=False):
"""
NAME:
cds
PURPOSE:
Cross-match against a catalog in the CDS archive using the CDS cross-matching service (http://cdsxmatch.u-strasbg.fr/xmatch); uses the curl interface
INPUT:
cat - a catalog to cross match, requires 'RA' and 'DEC' keywords (see below)
xcat= ('vizier:I/350/gaiaedr3') name of the catalog to cross-match against, in a format understood by the CDS cross-matching service (see http://cdsxmatch.u-strasbg.fr/xmatch/doc/available-tables.html; things like 'vizier:Tycho2' or 'vizier:I/345/gaia2')
maxdist= (2) maximum distance in arcsec
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
gaia_all_columns= (False) set to True if you are matching against Gaia DR2 and want *all* columns returned; this runs a query at the Gaia Archive, which may or may not work...
savefilename= (None) if set, save the output from CDS to this path; can match back using cds_matchback
OUTPUT:
(xcat entries for those that match,
indices into cat of matching sources: index[0] is cat index of xcat[0])
HISTORY:
2016-09-12 - Written based on RC catalog code - Bovy (UofT)
2016-09-21 - Account for Gaia epoch 2015 - Bovy (UofT)
2018-05-08 - Added gaia_all_columns - Bovy (UofT)
"""
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
# Write positions
posfilename= tempfile.mktemp('.csv',dir=os.getcwd())
resultfilename= tempfile.mktemp('.csv',dir=os.getcwd())
with open(posfilename,'w') as csvfile:
wr= csv.writer(csvfile,delimiter=',',quoting=csv.QUOTE_MINIMAL)
wr.writerow(['RA','DEC'])
for ii in range(len(cat)):
wr.writerow([(cat[ii][colRA]-dra[ii]+360.) % 360.,
cat[ii][colDec]]-ddec[ii])
_cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat)
# Directly match on input RA
ma= cds_load(resultfilename)
if gaia_all_columns:
from astroquery.gaia import Gaia
# Write another temporary file with the XML output of the cross-match
tab= Table(numpy.array([ma['source_id'],ma['RA'],ma['DEC']]).T,
names=('source_id','RA','DEC'),
dtype=('int64','float64','float64'))
xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())
tab.write(xmlfilename,format='votable')
#get the data release....
table_identifier = xcat.split('/')[-1]
if table_identifier == 'gaia2':
table_identifier = 'gaiadr2'
try:
job= Gaia.launch_job_async(
"""select g.*, m.RA as mRA, m.DEC as mDEC
from %s.gaia_source as g
inner join tap_upload.my_table as m on m.source_id = g.source_id""" % table_identifier,
upload_resource=xmlfilename,
upload_table_name="my_table")
ma= job.get_results()
except:
print("gaia_tools.xmath.cds failed to retrieve all gaia columns, returning just the default returned by the CDS xMatch instead...")
else:
ma.rename_column('mra','RA')
ma.rename_column('mdec','DEC')
finally:
os.remove(xmlfilename)
# Remove temporary files
os.remove(posfilename)
if savefilename is None:
os.remove(resultfilename)
else:
shutil.move(resultfilename,savefilename)
# Match back to the original catalog
mai= cds_matchback(cat,ma,colRA=colRA,colDec=colDec,epoch=epoch,
colpmRA=colpmRA,colpmDec=colpmDec)
return (ma,mai)
def _cds_match_batched(resultfilename,posfilename,maxdist,selection,xcat,
nruns_necessary=1):
"""CDS xMatch (sometimes?) fails for large matches, because of a time-out,
so we recursively split until the batches are small enough to not fail"""
# Figure out which of the hierarchy we are running
try:
runs= ''.join([str(int(r)-1)
for r in posfilename.split('csv.')[-1].split('.')])
except ValueError:
runs= ''
nruns= 2**len(runs)
if nruns >= nruns_necessary:
# Only run this level's match if we don't already know that we should
# be using smaller batches
_cds_basic_match(resultfilename,posfilename,maxdist,selection,xcat)
try:
ma= cds_load(resultfilename)
except ValueError: # Assume this is the time-out failure
pass
else:
return nruns
# xMatch failed because of time-out, split
posfilename1= posfilename+'.1'
posfilename2= posfilename+'.2'
resultfilename1= resultfilename+'.1'
resultfilename2= resultfilename+'.2'
# Figure out which of the hierarchy we are running
runs= ''.join([str(int(r)-1)
for r in posfilename1.split('csv.')[-1].split('.')])
nruns= 2**len(runs)
thisrun1= 1+int(runs,2)
thisrun2= 1+int(''.join([str(int(r)-1)
for r in posfilename2.split('csv.')[-1].split('.')]),2)
# Count the number of objects
with open(posfilename,'r') as posfile:
num_lines= sum(1 for line in posfile)
# Write the header line
with open(posfilename1,'w') as posfile1:
with open(posfilename,'r') as posfile:
posfile1.write(posfile.readline())
with open(posfilename2,'w') as posfile2:
with open(posfilename,'r') as posfile:
posfile2.write(posfile.readline())
# Cut in half
cnt= 0
with open(posfilename,'r') as posfile:
with open(posfilename1,'a') as posfile1:
with open(posfilename2,'a') as posfile2:
for line in posfile:
if cnt == 0:
cnt+= 1
continue
if cnt < num_lines//2:
posfile1.write(line)
cnt+= 1 # Can stop counting once this if is done
else:
posfile2.write(line)
# Run each
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun1,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename1,posfilename1,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+"Working on CDS xMatch batch {} / {} ...\r"\
.format(thisrun2,nruns))
sys.stdout.flush()
nruns_necessary= _cds_match_batched(resultfilename2,posfilename2,
maxdist,selection,xcat,
nruns_necessary=nruns_necessary)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Combine results
with open(resultfilename,'w') as resultfile:
with open(resultfilename1,'r') as resultfile1:
for line in resultfile1:
resultfile.write(line)
with open(resultfilename2,'r') as resultfile2:
for line in resultfile2:
if line[0] == 'a': continue
resultfile.write(line)
# Remove intermediate files
os.remove(posfilename1)
os.remove(posfilename2)
os.remove(resultfilename1)
os.remove(resultfilename2)
return nruns_necessary
def cds_matchback(cat,xcat,colRA='RA',colDec='DEC',selection='best',
epoch=None,colpmRA='pmra',colpmDec='pmdec',):
"""
NAME:
cds_matchback
PURPOSE:
Match a matched catalog from xmatch.cds back to the original catalog
INPUT
cat - original catalog
xcat - matched catalog returned by xmatch.cds
colRA= ('RA') name of the tag in cat with the right ascension
colDec= ('DEC') name of the tag in cat with the declination
selection= ('best') select either all matches or the best match according to CDS (see 'selection' at http://cdsxmatch.u-strasbg.fr/xmatch/doc/API-calls.html)
epoch= (2000.) epoch of the coordinates in cat
colpmRA= ('pmra') name of the tag in cat with the proper motion in right ascension in degree in cat (assumed to be ICRS; includes cos(Dec)) [only used when epoch != 2000.]
colpmDec= ('pmdec') name of the tag in cat with the proper motion in declination in degree in cat (assumed to be ICRS) [only used when epoch != 2000.]
OUTPUT:
Array indices into cat of xcat entries: index[0] is cat index of xcat[0]
HISTORY:
2016-09-12 - Written - Bovy (UofT)
2018-05-04 - Account for non-zero epoch difference - Bovy (UofT)
"""
if selection != 'all': selection= 'best'
if selection == 'all':
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
if epoch is None:
if 'ref_epoch' in cat.dtype.fields:
epoch= cat['ref_epoch']
else:
epoch= 2000.
_check_epoch(cat,epoch)
depoch= epoch-2000.
if numpy.any(depoch != 0.):
# Use proper motion to get both catalogs at the same time
dra=cat[colpmRA]/numpy.cos(cat[colDec]/180.*numpy.pi)\
/3600000.*depoch
ddec= cat[colpmDec]/3600000.*depoch
# Don't shift objects with non-existing proper motion
dra[numpy.isnan(cat[colpmRA])]= 0.
ddec[numpy.isnan(cat[colpmDec])]= 0.
else:
dra= numpy.zeros(len(cat))
ddec= numpy.zeros(len(cat))
# xmatch to v. small diff., because match is against *original* coords,
# not matched coords in CDS
mc1= acoords.SkyCoord(cat[colRA]-dra,cat[colDec]-ddec,
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(xcat['RA'],xcat['DEC'],
unit=(u.degree, u.degree),frame='icrs')
idx,d2d,d3d = mc2.match_to_catalog_sky(mc1)
mindx= d2d < 1e-5*u.arcsec
return idx[mindx]
| 46.786047 | 261 | 0.607814 |
e400f6b243c2f7da007de4b3632bc30927997f62 | 14,873 | py | Python | rllib/agents/dqn/dqn_torch_policy.py | ThomasLecat/ray | eb025ea8cb27583e8ef6287f5654f23d1ab270ef | [
"Apache-2.0"
] | null | null | null | rllib/agents/dqn/dqn_torch_policy.py | ThomasLecat/ray | eb025ea8cb27583e8ef6287f5654f23d1ab270ef | [
"Apache-2.0"
] | null | null | null | rllib/agents/dqn/dqn_torch_policy.py | ThomasLecat/ray | eb025ea8cb27583e8ef6287f5654f23d1ab270ef | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List, Tuple
import gym
import ray
from ray.rllib.agents.a3c.a3c_torch_policy import apply_grad_clipping
from ray.rllib.agents.dqn.dqn_tf_policy import (
PRIO_WEIGHTS, Q_SCOPE, Q_TARGET_SCOPE, postprocess_nstep_and_prio)
from ray.rllib.agents.dqn.dqn_torch_model import DQNTorchModel
from ray.rllib.agents.dqn.simple_q_torch_policy import TargetNetworkMixin
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_action_dist import (TorchCategorical,
TorchDistributionWrapper)
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy import LearningRateSchedule
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.exploration.parameter_noise import ParameterNoise
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import (FLOAT_MIN, huber_loss,
reduce_mean_ignore_inf,
softmax_cross_entropy_with_logits)
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
def build_q_model_and_distribution(
policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> Tuple[ModelV2, TorchDistributionWrapper]:
if not isinstance(action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
if config["hiddens"]:
# try to infer the last layer size, otherwise fall back to 256
num_outputs = ([256] + config["model"]["fcnet_hiddens"])[-1]
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm = (
isinstance(getattr(policy, "exploration", None), ParameterNoise)
or config["exploration_config"]["type"] == "ParameterNoise")
policy.q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.q_func_vars = policy.q_model.variables()
policy.target_q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="torch",
model_interface=DQNTorchModel,
name=Q_TARGET_SCOPE,
q_hiddens=config["hiddens"],
dueling=config["dueling"],
num_atoms=config["num_atoms"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=add_layer_norm)
policy.target_q_func_vars = policy.target_q_model.variables()
return policy.q_model, TorchCategorical
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
explore: bool = True,
is_training: bool = False,
**kwargs) -> Tuple[TensorType, type, List[TensorType]]:
q_vals = compute_q_values(policy, model, obs_batch, explore, is_training)
q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals
policy.q_values = q_vals
return policy.q_values, TorchCategorical, [] # state-out
def build_q_losses(policy: Policy, model, _,
train_batch: SampleBatch) -> TensorType:
config = policy.config
# Q-network evaluation.
q_t, q_logits_t, q_probs_t = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.CUR_OBS],
explore=False,
is_training=True)
# Target Q-network evaluation.
q_tp1, q_logits_tp1, q_probs_tp1 = compute_q_values(
policy,
policy.target_q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
# Q scores for actions which we know were selected in the given state.
one_hot_selection = F.one_hot(train_batch[SampleBatch.ACTIONS],
policy.action_space.n)
q_t_selected = torch.sum(
torch.where(q_t > FLOAT_MIN, q_t,
torch.tensor(0.0, device=policy.device)) *
one_hot_selection, 1)
q_logits_t_selected = torch.sum(
q_logits_t * torch.unsqueeze(one_hot_selection, -1), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
q_tp1_using_online_net, q_logits_tp1_using_online_net, \
q_dist_tp1_using_online_net = compute_q_values(
policy,
policy.q_model,
train_batch[SampleBatch.NEXT_OBS],
explore=False,
is_training=True)
q_tp1_best_using_online_net = torch.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = F.one_hot(q_tp1_best_using_online_net,
policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
else:
q_tp1_best_one_hot_selection = F.one_hot(
torch.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = torch.sum(
torch.where(q_tp1 > FLOAT_MIN, q_tp1,
torch.tensor(0.0, device=policy.device)) *
q_tp1_best_one_hot_selection, 1)
q_probs_tp1_best = torch.sum(
q_probs_tp1 * torch.unsqueeze(q_tp1_best_one_hot_selection, -1), 1)
policy.q_loss = QLoss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_probs_tp1_best,
train_batch[PRIO_WEIGHTS], train_batch[SampleBatch.REWARDS],
train_batch[SampleBatch.DONES].float(), config["gamma"],
config["n_step"], config["num_atoms"], config["v_min"],
config["v_max"])
return policy.q_loss.loss
def adam_optimizer(policy: Policy,
config: TrainerConfigDict) -> "torch.optim.Optimizer":
return torch.optim.Adam(
policy.q_func_vars, lr=policy.cur_lr, eps=config["adam_epsilon"])
def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
return dict({
"cur_lr": policy.cur_lr,
}, **policy.q_loss.stats)
def setup_early_mixins(policy: Policy, obs_space, action_space,
config: TrainerConfigDict) -> None:
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
def after_init(policy: Policy, obs_space: gym.Space, action_space: gym.Space,
config: TrainerConfigDict) -> None:
ComputeTDErrorMixin.__init__(policy)
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
# Move target net to device (this is done autoatically for the
# policy.model, but not for any other models the policy has).
policy.target_q_model = policy.target_q_model.to(policy.device)
def compute_q_values(policy: Policy,
model: ModelV2,
obs: TensorType,
explore,
is_training: bool = False):
config = policy.config
model_out, state = model({
SampleBatch.CUR_OBS: obs,
"is_training": is_training,
}, [], None)
if config["num_atoms"] > 1:
(action_scores, z, support_logits_per_action, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
else:
(action_scores, logits,
probs_or_logits) = model.get_q_value_distributions(model_out)
if config["dueling"]:
state_score = model.get_state_value(model_out)
if policy.config["num_atoms"] > 1:
support_logits_per_action_mean = torch.mean(
support_logits_per_action, dim=1)
support_logits_per_action_centered = (
support_logits_per_action - torch.unsqueeze(
support_logits_per_action_mean, dim=1))
support_logits_per_action = torch.unsqueeze(
state_score, dim=1) + support_logits_per_action_centered
support_prob_per_action = nn.functional.softmax(
support_logits_per_action)
value = torch.sum(z * support_prob_per_action, dim=-1)
logits = support_logits_per_action
probs_or_logits = support_prob_per_action
else:
advantages_mean = reduce_mean_ignore_inf(action_scores, 1)
advantages_centered = action_scores - torch.unsqueeze(
advantages_mean, 1)
value = state_score + advantages_centered
else:
value = action_scores
return value, logits, probs_or_logits
def grad_process_and_td_error_fn(policy: Policy,
optimizer: "torch.optim.Optimizer",
loss: TensorType) -> Dict[str, TensorType]:
# Clip grads if configured.
return apply_grad_clipping(policy, optimizer, loss)
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model,
action_dist) -> Dict[str, TensorType]:
return {"q_values": policy.q_values}
DQNTorchPolicy = build_torch_policy(
name="DQNTorchPolicy",
loss_fn=build_q_losses,
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_model_and_action_dist=build_q_model_and_distribution,
action_distribution_fn=get_distribution_inputs_and_class,
stats_fn=build_q_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=adam_optimizer,
extra_grad_process_fn=grad_process_and_td_error_fn,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
extra_action_out_fn=extra_action_out_fn,
before_init=setup_early_mixins,
after_init=after_init,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
])
| 39.76738 | 79 | 0.638741 |
e4011ff0a2fe000023c186be9341efbe90bde007 | 57 | py | Python | formfyxer/__init__.py | SuffolkLITLab/FormFyxer | 00a6a70b30f1899fc5273de1001f1f57c3728f60 | [
"MIT"
] | 1 | 2022-03-07T23:22:00.000Z | 2022-03-07T23:22:00.000Z | formfyxer/__init__.py | SuffolkLITLab/FormFyxer | 00a6a70b30f1899fc5273de1001f1f57c3728f60 | [
"MIT"
] | 32 | 2022-02-10T17:33:58.000Z | 2022-03-23T18:27:08.000Z | formfyxer/__init__.py | SuffolkLITLab/FormFyxer | 00a6a70b30f1899fc5273de1001f1f57c3728f60 | [
"MIT"
] | null | null | null | from .lit_explorer import *
from .pdf_wrangling import *
| 19 | 28 | 0.789474 |
e40169279b6d0abaccc4f8f3610827c98bbcceff | 6,197 | py | Python | Overview/11 - funktsioonid.py | priidupaomets/python_kursus | 731ab386ca40c321288659db21db23912ca7f8dd | [
"MIT"
] | 1 | 2021-02-19T15:21:28.000Z | 2021-02-19T15:21:28.000Z | Overview/11 - funktsioonid.py | priidupaomets/python_kursus | 731ab386ca40c321288659db21db23912ca7f8dd | [
"MIT"
] | null | null | null | Overview/11 - funktsioonid.py | priidupaomets/python_kursus | 731ab386ca40c321288659db21db23912ca7f8dd | [
"MIT"
] | 1 | 2018-03-24T11:01:46.000Z | 2018-03-24T11:01:46.000Z | """
funktsioonid.py
Funktsioonide ja protseduuride kasutamine
"""
#
# Protseduur
#
# Kutsume funktsiooni vlja
minu_funktsioon()
#
# Funktsioon
#
sum = liida(3, 5)
print(sum)
# Nide vaikevrtuste kasutamisest
# def funk(arg1 = vrtus1, arg2 = vrtus2)
# pass
funk() # Kutsume funktsiooni vlja ilma argumente kaasa andmata
#
# Algarvude leidmine
#
# Kustume funktsiooni testimiseks vlja
n = 5
if isprime(n):
print(f"{n} ON algarv") # Kasutame f-formaatimisstringi, mis lubab muutuja otse stringi sisse panna
else:
print(f"{n} EI OLE algarv")
list_primes()
#
# Muutuva arvu argumentidega funktsioonid
#
# Lisame lihtsalt uusi argumente
print(summa(1, 2, 3)) # Ttab
print(summa(1, 2)) # Saame vea, kuna uus funktsioon nuab 3 argumenti
# Katsetame funktsiooni lelaadimist (function overloading vi method overloading)
print(summa(1, 2)) # Saame vea, kuna viimane def kirjutab eelmise le
print(summa(1, 2, 3))
# Katsetame vaikevrtustega funktsioone
print(summa(1, 2))
print(summa(1, 2, 3))
print(summa(1, 2, 3, 4))
#print(summa(1, 2, 3, 4, 5)) # Selle tle saamiseks peame f-ni muutma
print(keskmine(1, 2)) # Ilmselgelt vale tulemus (1.5 asemel 0.75)
print(keskmine(1, 2, 3)) # Ka vale tulemus (2 asemel 1.5)
print(keskmine(1, 2, 3, 4)) # ige tulemus
# Tiendame argumentide arvu leidmist
print(keskmine(1, 2)) # ige tulemus
print(keskmine(1, 2, 3)) # ige tulemus
print(keskmine(1, 2, 3, 4)) # ige tulemus
print(keskmine(1, 2, 3, 0)) # Vale tulemus!
print(keskmine(1, 0, 3, 2)) # ige tulemus!?! Kuidas see nd ige on - kas tulemus sltub argumentide jrjekorrast?
# Kasutame teistsugust vaikevrtust
print(keskmine(1, 2)) # ige tulemus
print(keskmine(1, 2, 3)) # ige tulemus
print(keskmine(1, 2, 3, 4)) # ige tulemus
print(keskmine(1, 2, 3, 0)) # ige tulemus!
print(keskmine(1, 0, 3, 2)) # ige tulemus
# Proovime listiga argumente defineerida
#print(summa(1)) # Ei tta, kuna pole itereeritav tp
#print(summa(1, 2)) # Ei tta, kuna pole massiiv
arvud=[1, 2]
print(summa(arvud))
arvud=[1, 2, 3]
print(summa(arvud))
arvud=[1, 2, 3, 4]
print(summa(arvud))
print(summa([1, 2, 3, 4, 5])) # Vime panna ka ilma vahemuutujata
arvud=[1]
print(summa(arvud))
print(summa()) # Isegi see variant ttab
print(summa(1))
print(summa(1, 2))
arvud=[1, 2]
print(summa(*arvud)) # Ka siin tuleb '*' kasutada
arvud=[1, 2, 3]
print(summa(*arvud))
arvud=[1, 2, 3, 4]
print(summa(*arvud))
arvud=[1, 2, 3, 4, 5]
print(summa(*arvud))
arvud=[1]
print(summa(*arvud))
# Erinevat sort argumendid
argfun(1, 2, 3, 4, 5, kw1 = 10, kw2 = 12)
argfun(kw2 = 10, kw3 = 12, kw4 = 14)
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
# Kuidas garanteerida, et argumentideks on numbrid?
print(numsum(1))
print(numsum(1, 2))
print(numsum(1, 2, 3))
print(numsum(1, 2, 3, "4"))
print(numsum(1, None, 3, 4, 5))
print("-"*30)
print(numcount(1))
print(numcount(1, 2))
print(numcount(1, 2, 3))
print(numcount(1, 2, 3, "4"))
print(numcount(1, None, 3, 4, 5))
print("-"*30)
print(numavg(1))
print(numavg(1, 2))
print(numavg(1, 2, 3))
print(numavg(1, 2, 3, "4"))
print(numavg(1, None, 3, 4, 5))
print(numavg()) # Viga! Nulliga jagamine!!!
# Vigade haldamist vaatame peatselt ka lhemalt
| 24.01938 | 116 | 0.606745 |
e401cec76e2495c504bab2f84a98dc13530872c1 | 6,865 | py | Python | tests/integration/states/test_cmd.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/integration/states/test_cmd.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/integration/states/test_cmd.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Tests for the file state
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import textwrap
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
IS_WINDOWS = salt.utils.is_windows()
| 34.154229 | 94 | 0.57276 |
e402affb74681aeffbd7073f07e5537c7f847fc0 | 2,591 | py | Python | mars/tensor/execution/datastore.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | mars/tensor/execution/datastore.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | mars/tensor/execution/datastore.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import tiledb
except ImportError: # pragma: no cover
tiledb = None
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import sps
from ..expressions import datastore
from .utils import get_tiledb_ctx
| 37.550725 | 89 | 0.63296 |
e4041f8f3f0e170375ff7b152259c16fb293ef71 | 1,689 | py | Python | fastgc/model/mlp.py | ppmlguy/fastgradclip | 0d8bff42ab13fa3471c520a2823050ccf0ff4a21 | [
"MIT"
] | 2 | 2020-10-16T10:14:25.000Z | 2021-03-25T17:19:34.000Z | fastgc/model/mlp.py | ppmlguy/fastgradclip | 0d8bff42ab13fa3471c520a2823050ccf0ff4a21 | [
"MIT"
] | null | null | null | fastgc/model/mlp.py | ppmlguy/fastgradclip | 0d8bff42ab13fa3471c520a2823050ccf0ff4a21 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from fastgc.model.penet import PeGradNet
from fastgc.layers.linear import Linear
from fastgc.activation import activation
| 35.1875 | 102 | 0.605684 |
e40722bed82cf8f0cac95ef9146f043dd3dc25ca | 5,318 | py | Python | 05-Environments/hw02/hw02/hw02.py | ericchen12377/CS61A_LearningDoc | 31f23962b0e2834795bf61eeb0f4884cc5da1809 | [
"MIT"
] | 2 | 2020-04-24T18:36:53.000Z | 2020-04-25T00:15:55.000Z | 05-Environments/hw02/hw02/hw02.py | ericchen12377/CS61A_LearningDoc | 31f23962b0e2834795bf61eeb0f4884cc5da1809 | [
"MIT"
] | null | null | null | 05-Environments/hw02/hw02/hw02.py | ericchen12377/CS61A_LearningDoc | 31f23962b0e2834795bf61eeb0f4884cc5da1809 | [
"MIT"
] | null | null | null | """ Homework 2: Higher Order Functions"""
HW_SOURCE_FILE = 'hw02.py'
from operator import add, mul, sub
square = lambda x: x * x
identity = lambda x: x
triple = lambda x: 3 * x
increment = lambda x: x + 1
######################
# Required Questions #
######################
def product(n, f):
"""Return the product of the first n terms in a sequence.
n -- a positive integer
f -- a function that takes one argument to produce the term
>>> product(3, identity) # 1 * 2 * 3
6
>>> product(5, identity) # 1 * 2 * 3 * 4 * 5
120
>>> product(3, square) # 1^2 * 2^2 * 3^2
36
>>> product(5, square) # 1^2 * 2^2 * 3^2 * 4^2 * 5^2
14400
>>> product(3, increment) # (1+1) * (2+1) * (3+1)
24
>>> product(3, triple) # 1*3 * 2*3 * 3*3
162
"""
"*** YOUR CODE HERE ***"
result,k = 1,1
while k <= n:
result,k = f(k)*result, k + 1
return result
def accumulate(combiner, base, n, f):
"""Return the result of combining the first n terms in a sequence and base.
The terms to be combined are f(1), f(2), ..., f(n). combiner is a
two-argument commutative, associative function.
>>> accumulate(add, 0, 5, identity) # 0 + 1 + 2 + 3 + 4 + 5
15
>>> accumulate(add, 11, 5, identity) # 11 + 1 + 2 + 3 + 4 + 5
26
>>> accumulate(add, 11, 0, identity) # 11
11
>>> accumulate(add, 11, 3, square) # 11 + 1^2 + 2^2 + 3^2
25
>>> accumulate(mul, 2, 3, square) # 2 * 1^2 * 2^2 * 3^2
72
>>> accumulate(lambda x, y: x + y + 1, 2, 3, square)
19
>>> accumulate(lambda x, y: 2 * (x + y), 2, 3, square)
58
>>> accumulate(lambda x, y: (x + y) % 17, 19, 20, square)
16
"""
"*** YOUR CODE HERE ***"
result, k = base,1
while k <= n:
result, k = combiner(result,f(k)), k + 1
return result
def summation_using_accumulate(n, f):
"""Returns the sum of f(1) + ... + f(n). The implementation
uses accumulate.
>>> summation_using_accumulate(5, square)
55
>>> summation_using_accumulate(5, triple)
45
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'summation_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 0, 1
# while k <= n:
# result, k = result + f(k), k + 1
return accumulate(add,0,n,f)
def product_using_accumulate(n, f):
"""An implementation of product using accumulate.
>>> product_using_accumulate(4, square)
576
>>> product_using_accumulate(6, triple)
524880
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'product_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 1, 1
# while k <= n:
# result, k = result * f(k), k + 1
return accumulate(mul,1,n,f)
def compose1(h, g):
"""Return a function f, such that f(x) = h(g(x))."""
return f
def make_repeater(h, n):
"""Return the function that computes the nth application of h.
>>> add_three = make_repeater(increment, 3)
>>> add_three(5)
8
>>> make_repeater(triple, 5)(1) # 3 * 3 * 3 * 3 * 3 * 1
243
>>> make_repeater(square, 2)(5) # square(square(5))
625
>>> make_repeater(square, 4)(5) # square(square(square(square(5))))
152587890625
>>> make_repeater(square, 0)(5) # Yes, it makes sense to apply the function zero times!
5
"""
"*** YOUR CODE HERE ***"
return repeater
##########################
# Just for fun Questions #
##########################
def one(f):
"""Church numeral 1: same as successor(zero)"""
"*** YOUR CODE HERE ***"
return lambda x: f(x)
def two(f):
"""Church numeral 2: same as successor(successor(zero))"""
"*** YOUR CODE HERE ***"
return lambda x: f(f(x))
three = successor(two)
def church_to_int(n):
"""Convert the Church numeral n to a Python integer.
>>> church_to_int(zero)
0
>>> church_to_int(one)
1
>>> church_to_int(two)
2
>>> church_to_int(three)
3
"""
"*** YOUR CODE HERE ***"
return n(lambda x: x + 1)(0)
def add_church(m, n):
"""Return the Church numeral for m + n, for Church numerals m and n.
>>> church_to_int(add_church(two, three))
5
"""
"*** YOUR CODE HERE ***"
return lambda f: lambda x: m(f)(n(f)(x))
def mul_church(m, n):
"""Return the Church numeral for m * n, for Church numerals m and n.
>>> four = successor(three)
>>> church_to_int(mul_church(two, three))
6
>>> church_to_int(mul_church(three, four))
12
"""
"*** YOUR CODE HERE ***"
return lambda f: m(n(f))
def pow_church(m, n):
"""Return the Church numeral m ** n, for Church numerals m and n.
>>> church_to_int(pow_church(two, three))
8
>>> church_to_int(pow_church(three, two))
9
"""
"*** YOUR CODE HERE ***"
return n(m)
| 25.690821 | 92 | 0.548326 |
e407a1b65cd96d68a622c0a025047b036e6148f4 | 21,659 | py | Python | test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py | farleyb-amazon/aws-encryption-sdk-python | 7950abd73ee333407d2dadd02ef2d57c3df464cf | [
"Apache-2.0"
] | 95 | 2018-08-20T23:10:00.000Z | 2022-02-17T02:54:32.000Z | test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py | farleyb-amazon/aws-encryption-sdk-python | 7950abd73ee333407d2dadd02ef2d57c3df464cf | [
"Apache-2.0"
] | 220 | 2018-08-01T20:56:29.000Z | 2022-03-28T18:12:35.000Z | test_vector_handlers/src/awses_test_vectors/manifests/full_message/decrypt_generation.py | farleyb-amazon/aws-encryption-sdk-python | 7950abd73ee333407d2dadd02ef2d57c3df464cf | [
"Apache-2.0"
] | 63 | 2018-08-01T19:37:33.000Z | 2022-03-20T17:14:15.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
AWS Encryption SDK Decrypt Message Generation manifest handler.
Described in AWS Crypto Tools Test Vector Framework feature #0006 AWS Encryption SDK Decrypt Message Generation.
"""
import json
import os
import uuid
from copy import copy
import attr
import six
from aws_encryption_sdk.caches.local import LocalCryptoMaterialsCache
from aws_encryption_sdk.materials_managers.base import CryptoMaterialsManager
from aws_encryption_sdk.materials_managers.caching import CachingCryptoMaterialsManager
from aws_encryption_sdk.materials_managers.default import DefaultCryptoMaterialsManager
from awses_test_vectors.internal.defaults import ENCODING
from awses_test_vectors.internal.util import (
dictionary_validator,
file_reader,
file_writer,
iterable_validator,
membership_validator,
validate_manifest_type,
)
from awses_test_vectors.manifests.full_message.decrypt import (
DecryptionMethod,
MessageDecryptionManifest,
MessageDecryptionTestResult,
MessageDecryptionTestScenario,
)
from awses_test_vectors.manifests.full_message.encrypt import MessageEncryptionTestScenario
from awses_test_vectors.manifests.keys import KeysManifest
try:
from aws_encryption_sdk.identifiers import AlgorithmSuite
except ImportError:
from aws_encryption_sdk.identifiers import Algorithm as AlgorithmSuite
from awses_test_vectors.manifests.master_key import MasterKeySpec, master_key_provider_from_master_key_specs
try: # Python 3.5.0 and 3.5.1 have incompatible typing modules
from typing import IO, Callable, Dict, Iterable, Optional # noqa pylint: disable=unused-import
from awses_test_vectors.internal.mypy_types import ( # noqa pylint: disable=unused-import
ENCRYPT_SCENARIO_SPEC,
PLAINTEXTS_SPEC,
)
except ImportError: # pragma: no cover
# We only actually need these imports when running the mypy checks
pass
SUPPORTED_VERSIONS = (2,)
class ChangeEDKProviderInfoTamperingMethod(TamperingMethod):
"""Tampering method that changes the provider info on all EDKs."""
new_provider_infos = attr.ib(validator=iterable_validator(list, six.string_types))
def __init__(self, new_provider_infos):
"""Create a new instance for a given new provider info value."""
self.new_provider_infos = new_provider_infos
# pylint: disable=R0201
def run_scenario_with_tampering(self, ciphertext_writer, generation_scenario, _plaintext_uri):
"""
Run a given scenario, tampering with the input or the result.
return: a list of (ciphertext, result) pairs.
"""
master_key_provider = generation_scenario.encryption_scenario.master_key_provider_fn()
# Use a caching CMM to avoid generating a new data key every time.
cache = LocalCryptoMaterialsCache(10)
caching_cmm = CachingCryptoMaterialsManager(
master_key_provider=master_key_provider,
cache=cache,
max_age=60.0,
max_messages_encrypted=100,
)
return [
self.run_scenario_with_new_provider_info(
ciphertext_writer, generation_scenario, caching_cmm, new_provider_info
)
for new_provider_info in self.new_provider_infos
]
def run_scenario_with_new_provider_info(
self, ciphertext_writer, generation_scenario, materials_manager, new_provider_info
):
"""Run with tampering for a specific new provider info value"""
tampering_materials_manager = ProviderInfoChangingCryptoMaterialsManager(materials_manager, new_provider_info)
ciphertext_to_decrypt = generation_scenario.encryption_scenario.run(tampering_materials_manager)
expected_result = MessageDecryptionTestResult.expect_error(
"Incorrect encrypted data key provider info: " + new_provider_info
)
return generation_scenario.decryption_test_scenario_pair(
ciphertext_writer, ciphertext_to_decrypt, expected_result
)
class ProviderInfoChangingCryptoMaterialsManager(CryptoMaterialsManager):
"""
Custom CMM that modifies the provider info field on EDKS.
THIS IS ONLY USED TO CREATE INVALID MESSAGES and should never be used in
production!
"""
wrapped_cmm = attr.ib(validator=attr.validators.instance_of(CryptoMaterialsManager))
new_provider_info = attr.ib(validator=attr.validators.instance_of(six.string_types))
def __init__(self, materials_manager, new_provider_info):
"""Create a new CMM that wraps a the given CMM."""
self.wrapped_cmm = materials_manager
self.new_provider_info = new_provider_info
def get_encryption_materials(self, request):
"""
Request materials from the wrapped CMM, and then change the provider info
on each EDK.
"""
result = self.wrapped_cmm.get_encryption_materials(request)
for encrypted_data_key in result.encrypted_data_keys:
encrypted_data_key.key_provider.key_info = self.new_provider_info
return result
def decrypt_materials(self, request):
"""Thunks to the wrapped CMM"""
return self.wrapped_cmm.decrypt_materials(request)
BITS_PER_BYTE = 8
| 43.755556 | 120 | 0.722009 |
e409ad0c94dc67812d4ce4eb1f3a9b3b256b6a43 | 638 | py | Python | acceptance/test/TestStartStopFeature.py | ismacaulay/qtcwatchdog | 72f3588eef1019bac8788fa58c52722dfa7c4d28 | [
"MIT"
] | null | null | null | acceptance/test/TestStartStopFeature.py | ismacaulay/qtcwatchdog | 72f3588eef1019bac8788fa58c52722dfa7c4d28 | [
"MIT"
] | 12 | 2015-10-22T15:38:28.000Z | 2016-03-22T18:53:57.000Z | acceptance/test/TestStartStopFeature.py | ismacaulay/qtcwatchdog | 72f3588eef1019bac8788fa58c52722dfa7c4d28 | [
"MIT"
] | null | null | null | from acceptance.harness.acceptance_test import WatchdogAcceptanceTest
| 26.583333 | 69 | 0.761755 |
e409e1ff47556f0c395cedaf6538d4e9082df50c | 1,243 | py | Python | neural_spline_flows/nde/transforms/transform_test.py | VincentStimper/nsf | 6bde505639ebcb67bffa227ea0021e3de235e03d | [
"MIT"
] | null | null | null | neural_spline_flows/nde/transforms/transform_test.py | VincentStimper/nsf | 6bde505639ebcb67bffa227ea0021e3de235e03d | [
"MIT"
] | null | null | null | neural_spline_flows/nde/transforms/transform_test.py | VincentStimper/nsf | 6bde505639ebcb67bffa227ea0021e3de235e03d | [
"MIT"
] | null | null | null | import torch
import torchtestcase
from neural_spline_flows.nde.transforms import base
| 37.666667 | 78 | 0.693484 |
e40c283a7830ae526fea47bfe3f1719fdb809be3 | 358 | py | Python | directory-traversal/validate-file-extension-null-byte-bypass.py | brandonaltermatt/penetration-testing-scripts | 433b5d000a5573e60b9d8e49932cedce74937ebc | [
"MIT"
] | null | null | null | directory-traversal/validate-file-extension-null-byte-bypass.py | brandonaltermatt/penetration-testing-scripts | 433b5d000a5573e60b9d8e49932cedce74937ebc | [
"MIT"
] | null | null | null | directory-traversal/validate-file-extension-null-byte-bypass.py | brandonaltermatt/penetration-testing-scripts | 433b5d000a5573e60b9d8e49932cedce74937ebc | [
"MIT"
] | null | null | null | """
https://portswigger.net/web-security/file-path-traversal/lab-validate-file-extension-null-byte-bypass
"""
import sys
import requests
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
url = f'''https://{site}/image?filename=../../../etc/passwd%00.png'''
s = requests.Session()
resp = s.get(url)
print(resp.text) | 21.058824 | 101 | 0.664804 |
e40ca767179088e9b2626907b90dc14b9802c60c | 10,237 | py | Python | atmpro1_vsm2.py | joselynzhao/One-shot-Person-Re-ID-ATM | d039b1a66410f87cfe931774eba54a5f1a1a0260 | [
"MIT"
] | 3 | 2020-07-28T03:16:51.000Z | 2020-11-23T05:39:54.000Z | atmpro1_vsm2.py | joselynzhao/One-shot-Person-Re-ID-ATM | d039b1a66410f87cfe931774eba54a5f1a1a0260 | [
"MIT"
] | null | null | null | atmpro1_vsm2.py | joselynzhao/One-shot-Person-Re-ID-ATM | d039b1a66410f87cfe931774eba54a5f1a1a0260 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/3 11:03
# @Author : Joselynzhao
# @Email : [email protected]
# @File : atmpro1_vsm2.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/9/1 7:07
# @Author : Joselynzhao
# @Email : [email protected]
# @File : atmpro1_vsm.py
# @Software: PyCharm
# @Desc :
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/8/26 8:26
# @Author : Joselynzhao
# @Email : [email protected]
# @File : atmpro1.py
# @Software: PyCharm
# @Desc :
from my_reid.eug import *
from my_reid import datasets
from my_reid import models
import numpy as np
import torch
import argparse
import os
import warnings
warnings.filterwarnings("ignore")
from my_reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from my_reid.utils.serialization import load_checkpoint
from torch import nn
import time
import pickle
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from pathlib import Path
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Progressive Learning for One-Example re-ID')
parser.add_argument('-d', '--dataset', type=str, default='mars',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-f', '--fea', type=int, default=1024)
parser.add_argument('--EF', type=int, default=10)
parser.add_argument('--t', type=float, default=2) #tagper, step.
parser.add_argument('--exp_order', type=str, default='0')
parser.add_argument('--exp_name', type=str, default='atm')
parser.add_argument('--exp_aim', type=str, default='for paper')
parser.add_argument('--run_file',type=str,default='train.py')
parser.add_argument('--log_name',type=str,default='pl_logs')
parser.add_argument('--topk',type=int,default=2)
parser.add_argument('--vsm_lambda',type=float,default=0.5)
parser.add_argument('--resume', type=str, default='Yes')
parser.add_argument('--max_frames', type=int, default=900)
parser.add_argument('--loss', type=str, default='ExLoss', choices=['CrossEntropyLoss', 'ExLoss'])
parser.add_argument('--init', type=float, default=-1)
parser.add_argument('-m', '--momentum', type=float, default=0.5)
parser.add_argument('-e', '--epochs', type=int, default=70)
parser.add_argument('-s', '--step_size', type=int, default=55)
parser.add_argument('--lamda', type=float, default=0.5)
main(parser.parse_args())
| 41.783673 | 225 | 0.65019 |
7c0e42d68dd892a292e20be61de2cca89811eb9b | 6,252 | py | Python | consumer/tests/test__index_handler.py | eHealthAfrica/aether-elasticsearch-consumer | fc29a1da8cfd7482257b1023b50a1a43372886c5 | [
"Apache-2.0"
] | null | null | null | consumer/tests/test__index_handler.py | eHealthAfrica/aether-elasticsearch-consumer | fc29a1da8cfd7482257b1023b50a1a43372886c5 | [
"Apache-2.0"
] | 8 | 2018-08-02T09:11:22.000Z | 2021-09-13T14:12:22.000Z | consumer/tests/test__index_handler.py | eHealthAfrica/aether-elasticsearch-consumer | fc29a1da8cfd7482257b1023b50a1a43372886c5 | [
"Apache-2.0"
] | 1 | 2019-10-29T11:29:32.000Z | 2019-10-29T11:29:32.000Z | # Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pytest
import requests
import responses
from time import sleep
from elasticsearch.exceptions import NotFoundError
from aet.logger import get_logger
from app import index_handler
from . import * # noqa # fixtures
LOG = get_logger('TEST-IDX')
# convenience function for jsonpath
| 31.736041 | 85 | 0.679303 |
7c0e9d465eeddf2a8eeee673a92ff1e660a22216 | 57 | py | Python | plans/config.py | datopian/plans | 12bd9ff6f725703e7a73f3ad90680f5ade8cebdf | [
"MIT"
] | 3 | 2019-11-18T12:04:27.000Z | 2020-03-07T02:45:45.000Z | plans/config.py | datopian/plans | 12bd9ff6f725703e7a73f3ad90680f5ade8cebdf | [
"MIT"
] | null | null | null | plans/config.py | datopian/plans | 12bd9ff6f725703e7a73f3ad90680f5ade8cebdf | [
"MIT"
] | null | null | null | import os
database_url = os.environ.get('DATABASE_URL')
| 14.25 | 45 | 0.77193 |
7c0efca532f7042e0db58c5e7fb4f25f0274261b | 3,437 | py | Python | Assignment Day 2 .py | ShubhamKahlon57/Letsupgrade-python-Batch-7 | 7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af | [
"Apache-2.0"
] | null | null | null | Assignment Day 2 .py | ShubhamKahlon57/Letsupgrade-python-Batch-7 | 7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af | [
"Apache-2.0"
] | null | null | null | Assignment Day 2 .py | ShubhamKahlon57/Letsupgrade-python-Batch-7 | 7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#List and function
# In[6]:
# empty list
my_list = []
# list of integers
my_list = [1, 2, 3]
# list with mixed data types
my_list = [1, "Hello", 3.4]
# In[7]:
# nested list
my_list = ["mouse", [8, 4, 6], ['a']]
# In[11]:
# List indexing
my_list = ['p', 'r', 'o', 'b', 'e']
# Output: p
print(my_list[0])
# Output: o
print(my_list[2])
# Output: e
print(my_list[4])
# Nested List
n_list = ["Happy", [2, 0, 1, 5]]
# Nested indexing
print(n_list[0][1])
print(n_list[1][3])
# Error! Only integer can be used for indexing
print(my_list[4])
# In[9]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[13]:
# Deleting list items
my_list = ['p', 'r', 'o', 'b', 'l', 'e', 'm']
# delete one item
del my_list[2]
print(my_list)
# delete multiple items
del my_list[1:5]
print(my_list)
# delete entire list
del my_list
# In[14]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[15]:
#Dictionary and function
# In[18]:
y_dict = {}
# dictionary with integer keys
my_dict = {1: 'apple', 2: 'ball'}
# dictionary with mixed keys
my_dict = {'name': 'John', 1: [2, 4, 3]}
# using dict()
my_dict = dict({1:'apple', 2:'ball'})
# from sequence having each item as a pair
my_dict = dict([(1,'apple'), (2,'ball')])
# In[20]:
# get vs [] for retrieving elements
my_dict = {'name': 'Jack', 'age': 26}
# Output: Jack
print(my_dict['name'])
# Output: 26
print(my_dict.get('age'))
# In[21]:
# Changing and adding Dictionary Elements
my_dict = {'name': 'Jack', 'age': 26}
# update value
my_dict['age'] = 27
#Output: {'age': 27, 'name': 'Jack'}
print(my_dict)
# add item
my_dict['address'] = 'Downtown'
# Output: {'address': 'Downtown', 'age': 27, 'name': 'Jack'}
print(my_dict)
# In[22]:
#Sets and its function
# In[23]:
my_set = {1, 2, 3}
print(my_set)
# In[24]:
my_set = {1.0, "Hello", (1, 2, 3)}
print(my_set)
# In[25]:
# set cannot have duplicates
my_set = {1, 2, 3, 4, 3, 2}
print(my_set)
# In[26]:
#Tuple and its method
# In[27]:
# Tuple having integers
my_tuple = (1, 2, 3)
print(my_tuple)
# In[28]:
my_tuple = ("hello")
print(type(my_tuple))
# In[30]:
# Accessing tuple elements using indexing
my_tuple = ('p','e','r','m','i','t')
print(my_tuple[0])
print(my_tuple[5])
# In[31]:
print(my_tuple[-1])
# In[32]:
print(my_tuple[-6])
# In[36]:
# Changing tuple values
my_tuple = (4, 2, 3, [6, 5])
# TypeError: 'tuple' object does not support item assignment
# my_tuple[1] = 9
# However, item of mutable element can be changed
my_tuple[3][0] = 9 # Output: (4, 2, 3, [9, 5])
print(my_tuple)
# Tuples can be reassigned
my_tuple = ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
# Output: ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
print(my_tuple)
# In[37]:
#String and its function
# In[38]:
# Python string examples - all assignments are identical.
String_var = 'Python'
String_var = "Python"
String_var = """Python"""
# with Triple quotes Strings can extend to multiple lines
String_var = """ This document will help you to
explore all the concepts
of Python Strings!!! """
# Replace "document" with "tutorial" and store in another variable
substr_var = String_var.replace("document", "tutorial")
print (substr_var)
# In[ ]:
| 12.059649 | 66 | 0.607507 |
7c0f8b607ed4a4992f5429c04c93d80a3e6a70fc | 9,656 | py | Python | tests/test_api_transaction.py | preston-wagner/authorizesauce | 130ee30f500c8b5bf9a6384296ca4f5d5bb565e7 | [
"MIT"
] | null | null | null | tests/test_api_transaction.py | preston-wagner/authorizesauce | 130ee30f500c8b5bf9a6384296ca4f5d5bb565e7 | [
"MIT"
] | null | null | null | tests/test_api_transaction.py | preston-wagner/authorizesauce | 130ee30f500c8b5bf9a6384296ca4f5d5bb565e7 | [
"MIT"
] | 1 | 2020-06-17T15:48:46.000Z | 2020-06-17T15:48:46.000Z | from datetime import date
from six import BytesIO, binary_type, u
from six.moves.urllib.parse import parse_qsl, urlencode
from unittest2 import TestCase
import mock
from authorizesauce.apis.transaction import PROD_URL, TEST_URL, TransactionAPI
from authorizesauce.data import Address, CreditCard
from authorizesauce.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
SUCCESS = MockResponse(
b'1;1;1;This transaction has been approved.;IKRAGJ;Y;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;P;2;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_SUCCESS = {
'cvv_response': 'P',
'authorization_code': 'IKRAGJ',
'response_code': '1',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'Y',
'response_reason_code': '1',
'response_reason_text': 'This transaction has been approved.',
'transaction_id': '2171062816',
}
ERROR = MockResponse(
b'2;1;2;This transaction has been declined.;000000;N;2171062816;;;20.00;CC'
b';auth_only;;Jeffrey;Schenck;;45 Rose Ave;Venice;CA;90291;USA;;;;;;;;;;;;'
b';;;;;375DD9293D7605E20DF0B437EE2A7B92;N;1;;;;;;;;;;;XXXX1111;Visa;;;;;;;'
b';;;;;;;;;;Y')
PARSED_ERROR = {
'cvv_response': 'N',
'authorization_code': '000000',
'response_code': '2',
'amount': '20.00',
'transaction_type': 'auth_only',
'avs_response': 'N',
'response_reason_code': '2',
'response_reason_text': 'This transaction has been declined.',
'transaction_id': '2171062816',
}
| 40.06639 | 79 | 0.629453 |
7c10a8d2f209ef6de0439f6adc19d3fc6d877d41 | 678 | py | Python | src/genie/libs/parser/iosxe/tests/ShowIpv6ProtocolsSectionRip/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/iosxe/tests/ShowIpv6ProtocolsSectionRip/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/iosxe/tests/ShowIpv6ProtocolsSectionRip/cli/equal/golden_output_2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
"vrf": {
"VRF1": {
"address_family": {
"ipv6": {
"instance": {
"rip ripng": {
"redistribute": {
"static": {"route_policy": "static-to-rip"},
"connected": {},
},
"interfaces": {
"GigabitEthernet3.200": {},
"GigabitEthernet2.200": {},
},
}
}
}
}
}
}
}
| 29.478261 | 76 | 0.227139 |
7c11512944aa360a8ca2b2179d573b01222bea5e | 2,621 | py | Python | build_json.py | sungpyocho/covid19-aichi-tools | 5170bf405f67b14179fe10838701ec5baa9d6cc1 | [
"MIT"
] | null | null | null | build_json.py | sungpyocho/covid19-aichi-tools | 5170bf405f67b14179fe10838701ec5baa9d6cc1 | [
"MIT"
] | null | null | null | build_json.py | sungpyocho/covid19-aichi-tools | 5170bf405f67b14179fe10838701ec5baa9d6cc1 | [
"MIT"
] | null | null | null | import csv
import io
import json
import pandas as pd
import sys
from dateutil import tz
from datetime import datetime, date, time, timedelta
# Japan Standard Time (UTC + 09:00)
JST = tz.gettz('Asia/Tokyo')
JST_current_time = datetime.now(tz=JST).strftime('%Y/%m/%d %H:%M')
patients_list = []
patients_summary_dic = {}
#
args = sys.argv
with open('data/patients.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
patients_list.append(row)
patients_summary_dic.setdefault(row['date'], 0)
patients_summary_dic[row['date']] += 1
#
strdt = datetime.strptime("2020-01-26", '%Y-%m-%d') #
enddt = datetime.strptime(args[1], '%Y-%m-%d') #
#
days_num = (enddt - strdt).days + 1
datelist = []
for i in range(days_num):
datelist.append(strdt + timedelta(days = i))
patients_summary_list = []
# 0
foundZero = True
for date in reversed(datelist):
if (not (date.strftime('%Y-%m-%d') in patients_summary_dic)) and foundZero:
continue
else:
foundZero = False
patients_summary_dic.setdefault(date.strftime('%Y-%m-%d'), 0)
patients_summary_list.append({
"": date.strftime('%Y-%m-%d'),
"": patients_summary_dic[date.strftime('%Y-%m-%d')]
})
patients_summary_list = patients_summary_list[::-1] #
# main_summary_history.csvPandasDataframe
main_summary_history_df = pd.read_csv('data/main_summary_history.csv', keep_default_na=False)
#
inspections_summary_list = []
with open('data/inspections_summary.csv', 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
inspections_summary_list.append({
"": datetime.strptime(row[''], '%Y/%m/%d').strftime('%Y-%m-%d'),
"": int(row['']),
"": row['']
})
data = {
"lastUpdate": JST_current_time,
"patients": {
"date": JST_current_time,
"data": patients_list
},
"patients_summary" : {
"date": JST_current_time,
"data": patients_summary_list
},
"inspections_summary" : {
"date": JST_current_time,
"data": inspections_summary_list
},
"main_summary_history": {
"date": JST_current_time,
"data": json.loads(main_summary_history_df.to_json(orient='records', force_ascii=False))
}
}
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
print(json.dumps(data, indent=4, ensure_ascii=False))
| 28.48913 | 96 | 0.655857 |
7c1199fad1c1f92e7be3b25334e3b5e42a47fbe5 | 6,633 | py | Python | dl/models/ssd/modules/utils.py | jjjkkkjjj/pytorch.dl | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | [
"MIT"
] | 2 | 2021-02-06T22:40:13.000Z | 2021-03-26T09:15:34.000Z | dl/models/ssd/modules/utils.py | jjjkkkjjj/pytorch.dl | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | [
"MIT"
] | 8 | 2020-07-11T07:10:51.000Z | 2022-03-12T00:39:03.000Z | dl/models/ssd/modules/utils.py | jjjkkkjjj/pytorch.dl | d82aa1191c14f328c62de85e391ac6fa1b4c7ee3 | [
"MIT"
] | 2 | 2021-03-26T09:19:42.000Z | 2021-07-27T02:38:09.000Z | import torch
from ....data.utils.boxes import centroids2corners, iou
def matching_strategy(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 1+4+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty((batch_num, dboxes_num, 4 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_conf = target[:, :4], target[:, 4:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
#object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1)# ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:] = targets_loc[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 4:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
def matching_strategy_quads(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 4=(cx,cy,w,h)+8=(x1,y1,x2,y2,...)+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4 - 8
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty(
(batch_num, dboxes_num, 4 + 8 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_quad, targets_conf = target[:, :4], target[:, 4:12], target[:, 12:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
# object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1) # ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:12], matched_targets[b, :, 12:] = \
targets_loc[object_indices], targets_quad[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 12:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
| 41.45625 | 174 | 0.673903 |
7c120c632a3695672ca8dce5ff251b3540195c6e | 68,026 | py | Python | sandroad.py | lancelee82/bluelake | 3ac3bba191ec5e331dcf66e0a20725445585c316 | [
"MIT"
] | null | null | null | sandroad.py | lancelee82/bluelake | 3ac3bba191ec5e331dcf66e0a20725445585c316 | [
"MIT"
] | null | null | null | sandroad.py | lancelee82/bluelake | 3ac3bba191ec5e331dcf66e0a20725445585c316 | [
"MIT"
] | null | null | null | """
Flatpath, go forward forever.
http://codeincomplete.com/posts/javascript-racer/
http://www.extentofthejam.com/pseudo/
http://pixel.garoux.net/screen/game_list
Usage:
* UP/DOWN/LEFT/RIGHT
* SPACE : hide/show road map
* TAB : replay this road
* RETURN : go to a new road
TODO:
* hill road
* more road sprites
* sound
"""
import math
import random
import time
from starfish import pygm
from starfish import consts
from starfish import sptdraw
from starfish import utils
IMG_POS_BACKGROUND = {
'HILLS': { 'x': 5, 'y': 5, 'w': 1280, 'h': 480 },
'SKY': { 'x': 5, 'y': 495, 'w': 1280, 'h': 480 },
'TREES': { 'x': 5, 'y': 985, 'w': 1280, 'h': 480 },
}
IMG_POS_SPRITES = {
'PALM_TREE': { 'x': 5, 'y': 5, 'w': 215, 'h': 540 },
'BILLBOARD08': { 'x': 230, 'y': 5, 'w': 385, 'h': 265 },
'TREE1': { 'x': 625, 'y': 5, 'w': 360, 'h': 360 },
'DEAD_TREE1': { 'x': 5, 'y': 555, 'w': 135, 'h': 332 },
'BILLBOARD09': { 'x': 150, 'y': 555, 'w': 328, 'h': 282 },
'BOULDER3': { 'x': 230, 'y': 280, 'w': 320, 'h': 220 },
'COLUMN': { 'x': 995, 'y': 5, 'w': 200, 'h': 315 },
'BILLBOARD01': { 'x': 625, 'y': 375, 'w': 300, 'h': 170 },
'BILLBOARD06': { 'x': 488, 'y': 555, 'w': 298, 'h': 190 },
'BILLBOARD05': { 'x': 5, 'y': 897, 'w': 298, 'h': 190 },
'BILLBOARD07': { 'x': 313, 'y': 897, 'w': 298, 'h': 190 },
'BOULDER2': { 'x': 621, 'y': 897, 'w': 298, 'h': 140 },
'TREE2': { 'x': 1205, 'y': 5, 'w': 282, 'h': 295 },
'BILLBOARD04': { 'x': 1205, 'y': 310, 'w': 268, 'h': 170 },
'DEAD_TREE2': { 'x': 1205, 'y': 490, 'w': 150, 'h': 260 },
'BOULDER1': { 'x': 1205, 'y': 760, 'w': 168, 'h': 248 },
'BUSH1': { 'x': 5, 'y': 1097, 'w': 240, 'h': 155 },
'CACTUS': { 'x': 929, 'y': 897, 'w': 235, 'h': 118 },
'BUSH2': { 'x': 255, 'y': 1097, 'w': 232, 'h': 152 },
'BILLBOARD03': { 'x': 5, 'y': 1262, 'w': 230, 'h': 220 },
'BILLBOARD02': { 'x': 245, 'y': 1262, 'w': 215, 'h': 220 },
'STUMP': { 'x': 995, 'y': 330, 'w': 195, 'h': 140 },
'SEMI': { 'x': 1365, 'y': 490, 'w': 122, 'h': 144 },
'TRUCK': { 'x': 1365, 'y': 644, 'w': 100, 'h': 78 },
'CAR03': { 'x': 1383, 'y': 760, 'w': 88, 'h': 55 },
'CAR02': { 'x': 1383, 'y': 825, 'w': 80, 'h': 59 },
'CAR04': { 'x': 1383, 'y': 894, 'w': 80, 'h': 57 },
'CAR01': { 'x': 1205, 'y': 1018, 'w': 80, 'h': 56 },
'PLAYER_UPHILL_LEFT': { 'x': 1383, 'y': 961, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_STRAIGHT': { 'x': 1295, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_RIGHT': { 'x': 1385, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_LEFT': { 'x': 995, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_STRAIGHT': { 'x': 1085, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_RIGHT': { 'x': 995, 'y': 531, 'w': 80, 'h': 41 }
}
FP_COLOR_WHITE = '#FFFFFF'
FP_COLOR_BLACK = '#000000'
FP_COLOR_YELLOW = '#EEEE00'
FP_COLOR_BLUE = '#00EEEE'
FP_COLORS = {
'SKY': '#72D7EE',
'TREE': '#005108',
'FOG': '#005108',
'LIGHT': {'road': '#6B6B6B', 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
'DARK': {'road': '#696969', 'grass': '#009A00', 'rumble': '#BBBBBB' },
'START': {'road': FP_COLOR_WHITE, 'grass': FP_COLOR_WHITE, 'rumble': FP_COLOR_WHITE},
'FINISH': {'road': FP_COLOR_BLACK, 'grass': FP_COLOR_BLACK, 'rumble': FP_COLOR_BLACK},
'START_Y': {'road': FP_COLOR_YELLOW, 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
}
FP_ROAD = {
'LENGTH': {'NONE': 0, 'SHORT': 25, 'MEDIUM': 50, 'LONG': 100 }, # num segments
'CURVE': {'NONE': 0, 'EASY': 2, 'MEDIUM': 4, 'HARD': 6 },
'HILL': {'NONE': 0, 'LOW': 20, 'MEDIUM': 40, 'HIGH': 60 },
}
FP_ROAD_SPRTS = {
'chest': {'imgs': ['img_sprts/i_chest1.png'], 'score': 100,},
'coin1': {'imgs': ['img_sprts/i_coin1.png'], 'score': 1,},
'coin5': {'imgs': ['img_sprts/i_coin5.png'], 'score': 5,},
'coin20': {'imgs': ['img_sprts/i_coin20.png'], 'score': 20,},
'health': {'imgs': ['img_sprts/i_health.png'], 'score': 10,},
'heart': {'imgs': ['img_sprts/i_heart.png'], 'score': 50,},
'pot1': {'imgs': ['img_sprts/i_pot1.png'], 'score': -5,},
'pot2': {'imgs': ['img_sprts/i_pot2.png'], 'score': -1,},
'shell': {'imgs': ['img_sprts/p_shell.png'], 'score': -20,},
'rockd': {'imgs': ['img_sprts/rock_d2.png'], 'score': -10,},
'rockr': {'imgs': ['img_sprts/rock_r2.png'], 'score': -50,},
#'ashra_defeat': {'imgs': ['img_sprts/ashra_defeat1.png'], 'score': -100,},
#'bear': {'imgs': ['img_sprts/bear2.png'], 'score': -80,},
#'dinof': {'imgs': ['img_sprts/dinof2.png'], 'score': -50,},
'blobb': {'imgs': ['img_sprts/blobb1.png'], 'score': -50,},
'chick_fly': {'imgs': ['img_sprts/chick_fly3.png'], 'score': 70,},
'clown': {'imgs': ['img_sprts/clown1.png'], 'score': -100,},
}
def main():
#sf = GMFlatpath('flatpath <:::>', 640, 480)
sf = GMFlatpath('flatpath <:::>', 640, 480, road_file='sr_road.txt')
sf.mainloop()
if __name__ == '__main__':
main()
| 28.824576 | 100 | 0.467263 |
7c12ff613b7b049edec918f0aa7806f03a342762 | 9,197 | py | Python | First_course/test5_base.py | laetrid/learning | b28312c34db2118fb7d5691834b8f7e628117642 | [
"Apache-2.0"
] | null | null | null | First_course/test5_base.py | laetrid/learning | b28312c34db2118fb7d5691834b8f7e628117642 | [
"Apache-2.0"
] | null | null | null | First_course/test5_base.py | laetrid/learning | b28312c34db2118fb7d5691834b8f7e628117642 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
sw1_show_cdp_neighbors = '''
SW1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater, P - Phone
Device ID Local Intrfce Holdtme Capability Platform Port ID
R1 Fas 0/11 153 R S I 881 Fas 1
R2 Fas 0/12 123 R S I 881 Fas 1
R3 Fas 0/13 129 R S I 881 Fas 1
R4 Fas 0/14 173 R S I 881 Fas 1
R5 Fas 0/15 144 R S I 881 Fas 1
'''
sw1_show_cdp_neighbors_detail = '''
SW1> show cdp neighbors detail
--------------------------
Device ID: R1
Entry address(es):
IP address: 10.1.1.1
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/11, Port ID (outgoing port): FastEthernet1
Holdtime: 153 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R2
Entry address(es):
IP address: 10.1.1.2
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/12, Port ID (outgoing port): FastEthernet1
Holdtime: 123 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R3
Entry address(es):
IP address: 10.1.1.3
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/13, Port ID (outgoing port): FastEthernet1
Holdtime: 129 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R4
Entry address(es):
IP address: 10.1.1.4
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/14, Port ID (outgoing port): FastEthernet1
Holdtime: 173 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
--------------------------
Device ID: R5
Entry address(es):
IP address: 10.1.1.5
Platform: Cisco 881, Capabilities: Router Switch IGMP
Interface: FastEthernet0/15, Port ID (outgoing port): FastEthernet1
Holdtime: 144 sec
Version :
Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)
Technical Support: http://www.cisco.com/techsupport
Copyright (c) 1986-2010 by Cisco Systems, Inc.
Compiled Fri 29-Oct-10 00:02 by prod_rel_team
advertisement version: 2
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
Management address(es):
'''
r1_show_cdp_neighbors = '''
R1>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/11
'''
r1_show_cdp_neighbors_detail = '''
R1>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/11
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r2_show_cdp_neighbors = '''
R2>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/12
'''
r2_show_cdp_neighbors_detail = '''
R2>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/12
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r3_show_cdp_neighbors = '''
R3>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/13
'''
r3_show_cdp_neighbors_detail = '''
R3>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/13
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r4_show_cdp_neighbors = '''
R4>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/14
'''
r4_show_cdp_neighbors_detail = '''
R4>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/14
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
r5_show_cdp_neighbors = '''
R5>show cdp neighbors
Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge
S - Switch, H - Host, I - IGMP, r - Repeater
Device ID Local Intrfce Holdtme Capability Platform Port ID
SW1 Fas 1 150 S I WS-C2950- Fas 0/15
'''
r5_show_cdp_neighbors_detail = '''
R5>show cdp neighbors detail
-------------------------
Device ID: SW1
Entry address(es):
IP address: 10.1.1.22
Platform: cisco WS-C2950-24, Capabilities: Switch IGMP
Interface: FastEthernet1, Port ID (outgoing port): FastEthernet0/15
Holdtime : 145 sec
Version :
Cisco Internetwork Operating System Software
IOS (tm) C2950 Software (C2950-I6Q4L2-M), Version 12.1(22)EA8a, RELEASE SOFTWARE (fc1)
Copyright (c) 1986-2006 by cisco Systems, Inc.
Compiled Fri 28-Jul-06 15:16 by weiliu
advertisement version: 2
Protocol Hello: OUI=0x00000C, Protocol ID=0x0112; payload len=27, value=00000000FFFFFFFF010221FF0000000000000019E845CE80FF0000
VTP Management Domain: ''
Native VLAN: 1
Duplex: full
'''
| 37.084677 | 127 | 0.677938 |
7c138f84c229bf0a17e877706fc36f489907d8bf | 23,732 | py | Python | scipy/optimize/_numdiff.py | jeremiedbb/scipy | 2bea64c334b18fd445a7945b350d7ace2dc22913 | [
"BSD-3-Clause"
] | 1 | 2019-12-19T16:51:27.000Z | 2019-12-19T16:51:27.000Z | scipy/optimize/_numdiff.py | jeremiedbb/scipy | 2bea64c334b18fd445a7945b350d7ace2dc22913 | [
"BSD-3-Clause"
] | null | null | null | scipy/optimize/_numdiff.py | jeremiedbb/scipy | 2bea64c334b18fd445a7945b350d7ace2dc22913 | [
"BSD-3-Clause"
] | null | null | null | """Routines for numerical differentiation."""
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] B. Fornberg, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
| 37.08125 | 79 | 0.583727 |
7c147e3dd10a5e110c033ad9ba1df174aabe3c39 | 20,303 | py | Python | tests/models/test_hparams.py | abhinavg97/pytorch-lightning | 0d54cf25a2dba33e4640ac52768a83406e7a0a94 | [
"Apache-2.0"
] | 1 | 2020-10-26T09:02:08.000Z | 2020-10-26T09:02:08.000Z | tests/models/test_hparams.py | vivektalwar13071999/pytorch-lightning | 7c4f80a1afe3d7b0f1e9ee834aacaf8439195cdf | [
"Apache-2.0"
] | null | null | null | tests/models/test_hparams.py | vivektalwar13071999/pytorch-lightning | 7c4f80a1afe3d7b0f1e9ee834aacaf8439195cdf | [
"Apache-2.0"
] | null | null | null | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from argparse import Namespace
import cloudpickle
import pytest
import torch
from fsspec.implementations.local import LocalFileSystem
from omegaconf import OmegaConf, Container
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer, LightningModule
from pytorch_lightning.core.saving import save_hparams_to_yaml, load_hparams_from_yaml
from pytorch_lightning.utilities import AttributeDict, is_picklable
from tests.base import EvalModelTemplate, TrialMNIST, BoringModel
# -------------------------
# STANDARD TESTS
# -------------------------
def _run_standard_hparams_test(tmpdir, model, cls, try_overwrite=False):
"""
Tests for the existence of an arg 'test_arg=14'
"""
hparam_type = type(model.hparams)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, overfit_batches=2)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model2 = cls.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.test_arg == 14
assert isinstance(model2.hparams, hparam_type)
if try_overwrite:
# verify that we can overwrite the property
model3 = cls.load_from_checkpoint(raw_checkpoint_path, test_arg=78)
assert model3.hparams.test_arg == 78
return raw_checkpoint_path
def test_explicit_args_hparams(tmpdir):
"""
Tests that a model can take implicit args and assign
"""
# define model
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_implicit_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
model = LocalModel(test_arg=14, test_arg2=90)
# run standard test suite
raw_checkpoint_path = _run_standard_hparams_test(tmpdir, model, LocalModel)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=120)
# config specific tests
assert model.hparams.test_arg2 == 120
def test_explicit_missing_args_hparams(tmpdir):
"""
Tests that a model can take regular args and assign
"""
# define model
model = LocalModel(test_arg=14, test_arg2=90)
# test proper property assignments
assert model.hparams.test_arg == 14
# verify we can train
trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path)
assert LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in raw_checkpoint
assert raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]['test_arg'] == 14
# verify that model loads correctly
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, test_arg2=123)
assert model.hparams.test_arg == 14
assert 'test_arg2' not in model.hparams # test_arg2 is not registered in class init
return raw_checkpoint_path
# -------------------------
# SPECIFIC TESTS
# -------------------------
def _raw_checkpoint_path(trainer) -> str:
raw_checkpoint_paths = os.listdir(trainer.checkpoint_callback.dirpath)
raw_checkpoint_paths = [x for x in raw_checkpoint_paths if '.ckpt' in x]
assert raw_checkpoint_paths
raw_checkpoint_path = raw_checkpoint_paths[0]
raw_checkpoint_path = os.path.join(trainer.checkpoint_callback.dirpath, raw_checkpoint_path)
return raw_checkpoint_path
# @pytest.mark.parametrize("cls,config", [
# (SaveHparamsModel, Namespace(my_arg=42)),
# (SaveHparamsModel, dict(my_arg=42)),
# (SaveHparamsModel, OmegaConf.create(dict(my_arg=42))),
# (AssignHparamsModel, Namespace(my_arg=42)),
# (AssignHparamsModel, dict(my_arg=42)),
# (AssignHparamsModel, OmegaConf.create(dict(my_arg=42))),
# ])
# def test_single_config_models(tmpdir, cls, config):
# """ Test that the model automatically saves the arguments passed into the constructor """
# model = cls(config)
#
# # no matter how you do it, it should be assigned
# assert model.hparams.my_arg == 42
#
# # verify that the checkpoint saved the correct values
# trainer = Trainer(default_root_dir=tmpdir, max_epochs=2, overfit_batches=0.5)
# trainer.fit(model)
#
# # verify that model loads correctly
# raw_checkpoint_path = _raw_checkpoint_path(trainer)
# model = cls.load_from_checkpoint(raw_checkpoint_path)
# assert model.hparams.my_arg == 42
class AnotherArgModel(EvalModelTemplate):
def __init__(self, arg1):
super().__init__()
self.save_hyperparameters(arg1)
class OtherArgsModel(EvalModelTemplate):
def test_hparams_pickle(tmpdir):
ad = AttributeDict({'key1': 1, 'key2': 'abc'})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
class UnpickleableArgsEvalModel(EvalModelTemplate):
""" A model that has an attribute that cannot be pickled. """
def test_hparams_pickle_warning(tmpdir):
model = UnpickleableArgsEvalModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
with pytest.warns(UserWarning, match="attribute 'pickle_me' removed from hparams because it cannot be pickled"):
trainer.fit(model)
assert 'pickle_me' not in model.hparams
def test_hparams_save_yaml(tmpdir):
hparams = dict(batch_size=32, learning_rate=0.001, data_root='./any/path/here',
nasted=dict(any_num=123, anystr='abcd'))
path_yaml = os.path.join(tmpdir, 'testing-hparams.yaml')
save_hparams_to_yaml(path_yaml, hparams)
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, Namespace(**hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, AttributeDict(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
save_hparams_to_yaml(path_yaml, OmegaConf.create(hparams))
assert load_hparams_from_yaml(path_yaml) == hparams
class NoArgsSubClassEvalModel(EvalModelTemplate):
class SimpleNoArgsModel(LightningModule):
def test_model_ignores_non_exist_kwargument(tmpdir):
"""Test that the model takes only valid class arguments."""
model = LocalModel()
assert model.hparams.batch_size == 15
# verify that the checkpoint saved the correct values
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
# verify that we can overwrite whatever we want
raw_checkpoint_path = _raw_checkpoint_path(trainer)
model = LocalModel.load_from_checkpoint(raw_checkpoint_path, non_exist_kwarg=99)
assert 'non_exist_kwarg' not in model.hparams
def test_args(tmpdir):
""" Test for inheritance: super class takes positional arg, subclass takes varargs. """
hparams = dict(test=1)
model = SubClassVarArgs(hparams)
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
trainer.fit(model)
raw_checkpoint_path = _raw_checkpoint_path(trainer)
with pytest.raises(TypeError, match="__init__\(\) got an unexpected keyword argument 'test'"):
SubClassVarArgs.load_from_checkpoint(raw_checkpoint_path)
def test_model_with_fsspec_as_parameter(tmpdir):
model = UnsafeParamModel(LocalFileSystem(tmpdir))
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=1,
)
trainer.fit(model)
trainer.test()
| 33.174837 | 116 | 0.713441 |
7c149f4f2e879ee66f71bed92f16a685a097e92b | 20,142 | py | Python | tests/space_test.py | hadrianmontes/jax-md | cea1cc6b22db6044a502eeeab4bddde35ac15d94 | [
"ECL-2.0",
"Apache-2.0"
] | 713 | 2019-05-14T19:02:00.000Z | 2022-03-31T17:42:23.000Z | tests/space_test.py | hadrianmontes/jax-md | cea1cc6b22db6044a502eeeab4bddde35ac15d94 | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2019-05-15T13:27:09.000Z | 2022-03-17T16:15:59.000Z | tests/space_test.py | hadrianmontes/jax-md | cea1cc6b22db6044a502eeeab4bddde35ac15d94 | [
"ECL-2.0",
"Apache-2.0"
] | 117 | 2019-05-17T13:23:37.000Z | 2022-03-18T10:32:29.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_md.space."""
from absl.testing import absltest
from absl.testing import parameterized
from jax.config import config as jax_config
from jax import random
import jax.numpy as jnp
from jax import grad, jit, jacfwd
from jax import test_util as jtu
from jax_md import space, test_util, quantity, energy
from jax_md.util import *
from functools import partial
from unittest import SkipTest
test_util.update_test_tolerance(5e-5, 5e-13)
jax_config.parse_flags_with_absl()
jax_config.enable_omnistaging()
FLAGS = jax_config.FLAGS
PARTICLE_COUNT = 10
STOCHASTIC_SAMPLES = 10
SHIFT_STEPS = 10
SPATIAL_DIMENSION = [2, 3]
BOX_FORMATS = ['scalar', 'vector', 'matrix']
if FLAGS.jax_enable_x64:
POSITION_DTYPE = [f32, f64]
else:
POSITION_DTYPE = [f32]
# pylint: disable=invalid-name
if __name__ == '__main__':
absltest.main()
| 35.52381 | 88 | 0.666369 |
7c14cbf83bd9f7d5d27ebfe3490cc6f31c415451 | 246 | py | Python | functions/batch-custom-action/status-api/lambda.py | TrollPursePublishing/trollpurse-trollops | 27e54cfd1ba1eed27097e2e3038dfab56691cf49 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | 2 | 2020-11-18T06:04:27.000Z | 2021-04-22T12:38:15.000Z | functions/batch-custom-action/status-api/lambda.py | TrollPursePublishing/trollpurse-ops | 27e54cfd1ba1eed27097e2e3038dfab56691cf49 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | functions/batch-custom-action/status-api/lambda.py | TrollPursePublishing/trollpurse-ops | 27e54cfd1ba1eed27097e2e3038dfab56691cf49 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | import boto3
batch_client = boto3.client('batch')
| 22.363636 | 65 | 0.678862 |
7c154bd7941e6664ea91468d29e01f725ad32c14 | 2,914 | py | Python | app/auth/views.py | ifaraag/app | d952f0dc58fd703074c19ed3235c1520119baf5f | [
"MIT"
] | null | null | null | app/auth/views.py | ifaraag/app | d952f0dc58fd703074c19ed3235c1520119baf5f | [
"MIT"
] | null | null | null | app/auth/views.py | ifaraag/app | d952f0dc58fd703074c19ed3235c1520119baf5f | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, redirect, url_for, request, flash
from flask.ext.login import login_required, login_user, logout_user
from werkzeug import check_password_hash, generate_password_hash
from app import db, login_manager, pubnub, app, _callback
from .models import User
from .forms import LoginForm, SignupForm
mod_auth = Blueprint('auth', __name__)
# @mod_auth.route('/googlelogin', methods=['GET', 'POST'])
def callback(message, channel):
db.data.insert_one(message)
def error(message):
db.data.insert_one(message)
| 37.358974 | 133 | 0.630062 |
7c159cac6567c00ed5a82a064ec8c65b30f68447 | 1,595 | py | Python | economist/migrations/0003_auto_20170406_1402.py | xingjianpan/news_reader_backend | c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9 | [
"MIT"
] | 1 | 2017-11-01T02:12:24.000Z | 2017-11-01T02:12:24.000Z | economist/migrations/0003_auto_20170406_1402.py | xingjianpan/news_reader_backend | c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9 | [
"MIT"
] | null | null | null | economist/migrations/0003_auto_20170406_1402.py | xingjianpan/news_reader_backend | c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 06:02
from __future__ import unicode_literals
from django.db import migrations, models
| 28.482143 | 58 | 0.552978 |
7c16097e2ba8634058cfc608cf9a3d535fa94016 | 2,051 | py | Python | test/test_ethereum.py | coinplus-sa/coinplus-solo | e4f385a3d9eb7b72e14e397761fd9a113938917a | [
"MIT"
] | 1 | 2018-08-21T06:28:36.000Z | 2018-08-21T06:28:36.000Z | test/test_ethereum.py | coinplus-sa/coinplus-solo | e4f385a3d9eb7b72e14e397761fd9a113938917a | [
"MIT"
] | 1 | 2019-05-30T06:23:41.000Z | 2019-09-03T09:49:06.000Z | test/test_ethereum.py | coinplus-sa/coinplus-solo | e4f385a3d9eb7b72e14e397761fd9a113938917a | [
"MIT"
] | 1 | 2021-06-30T12:36:25.000Z | 2021-06-30T12:36:25.000Z | import unittest
from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum
| 85.458333 | 149 | 0.741589 |
7c170adc77db7c06c4c5968ae2d5e3df343748b4 | 776 | py | Python | python97/chapter05/list_gen.py | youaresherlock/PythonPractice | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | [
"Apache-2.0"
] | null | null | null | python97/chapter05/list_gen.py | youaresherlock/PythonPractice | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | [
"Apache-2.0"
] | null | null | null | python97/chapter05/list_gen.py | youaresherlock/PythonPractice | 2e22d3fdcb26353cb0d8215c150e84d11bc9a022 | [
"Apache-2.0"
] | 1 | 2019-11-05T01:10:15.000Z | 2019-11-05T01:10:15.000Z | #!usr/bin/python
# -*- coding:utf8 -*-
# ()
# 1. 1-20
# odd_list = []
# for i in range(21):
# if i % 2 == 1:
# odd_list.append(i)
# odd_list = [i for i in range(21) if i % 2 == 1]
# print(odd_list)
# 2.
#
odd_list = [handle_item(i) for i in range(21) if i % 2 == 1]
print(odd_list)
#
odd_gen = (i for i in range(21) if i % 2 == 1)
print(type(odd_gen))
for item in odd_gen:
print(item)
#
my_dict = {"bobby1": 22, "bobby2": 23, "imooc.com": 5}
reversed_dict = {value:key for key, value in my_dict.items()}
print(reversed_dict)
#
my_set = set(my_dict.keys())
my_set = {key for key, value in my_dict.items()}
print(type(my_set))
| 15.836735 | 61 | 0.627577 |
7c17743faf77b54c0516f30699a3b1dc9b050a25 | 11,409 | py | Python | src/streamlink/plugin/plugin.py | isqad/streamlink | f6708f1d38d056177ac3d614ebbb740d956d46f0 | [
"BSD-2-Clause"
] | 1 | 2017-11-26T18:48:29.000Z | 2017-11-26T18:48:29.000Z | src/streamlink/plugin/plugin.py | isqad/streamlink | f6708f1d38d056177ac3d614ebbb740d956d46f0 | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugin/plugin.py | isqad/streamlink | f6708f1d38d056177ac3d614ebbb740d956d46f0 | [
"BSD-2-Clause"
] | 1 | 2021-06-03T23:08:48.000Z | 2021-06-03T23:08:48.000Z | import ast
import operator
import re
from collections import OrderedDict
from functools import partial
from ..cache import Cache
from ..exceptions import PluginError, NoStreamsError
from ..options import Options
# FIXME: This is a crude attempt at making a bitrate's
# weight end up similar to the weight of a resolution.
# Someone who knows math, please fix.
BIT_RATE_WEIGHT_RATIO = 2.8
ALT_WEIGHT_MOD = 0.01
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
},
"tv": {
"hd": 1080,
"sd": 576,
},
"quality": {
"ehq": 720,
"hq": 576,
"sq": 360,
},
}
FILTER_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
}
PARAMS_REGEX = r"(\w+)=({.+?}|\[.+?\]|\(.+?\)|'(?:[^'\\]|\\')*'|\"(?:[^\"\\]|\\\")*\"|\S+)"
HIGH_PRIORITY = 30
NORMAL_PRIORITY = 20
LOW_PRIORITY = 10
NO_PRIORITY = 0
def get_streams(self, *args, **kwargs):
"""Deprecated since version 1.9.0.
Has been renamed to :func:`Plugin.streams`, this is an alias
for backwards compatibility.
"""
return self.streams(*args, **kwargs)
__all__ = ["Plugin"]
| 29.104592 | 101 | 0.573582 |
7c18032075b4197ee9055f4f541529df445b2854 | 998 | py | Python | tests/cli/conftest.py | Aahbree/reference-data-repository | f318c0532aaf941ec4f00c8375c9dea45c56f186 | [
"MIT"
] | null | null | null | tests/cli/conftest.py | Aahbree/reference-data-repository | f318c0532aaf941ec4f00c8375c9dea45c56f186 | [
"MIT"
] | 5 | 2021-01-27T22:17:19.000Z | 2021-12-14T17:13:58.000Z | tests/cli/conftest.py | Aahbree/reference-data-repository | f318c0532aaf941ec4f00c8375c9dea45c56f186 | [
"MIT"
] | 5 | 2021-12-08T02:33:44.000Z | 2021-12-13T03:21:51.000Z | # This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Fixtures for testing the command-line interface."""
import os
import pytest
from click.testing import CliRunner
from refdata.db import DB
import refdata.config as config
| 30.242424 | 78 | 0.733467 |
7c1898e479d14fbe657ed1376514f87c04d2b942 | 2,971 | py | Python | swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py | lhoestq/DeDLOC | 36f5a6d043c3d727f9d098a35fba94aa351a5cd4 | [
"Apache-2.0"
] | null | null | null | swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py | lhoestq/DeDLOC | 36f5a6d043c3d727f9d098a35fba94aa351a5cd4 | [
"Apache-2.0"
] | null | null | null | swav/vissl/vissl/data/ssl_transforms/img_patches_tensor.py | lhoestq/DeDLOC | 36f5a6d043c3d727f9d098a35fba94aa351a5cd4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import math
from typing import Any, Dict
import numpy as np
from classy_vision.dataset.transforms import register_transform
from classy_vision.dataset.transforms.classy_transform import ClassyTransform
| 37.607595 | 88 | 0.623023 |
7c1a4912119b5eeaa02dc5d6942de0df8f969733 | 1,783 | py | Python | python/jittor/utils/publish.py | Jittor/Jittor | bc945bae94bded917214b0afe12be6bf5b919dbe | [
"Apache-2.0"
] | 4 | 2020-01-12T13:16:16.000Z | 2020-01-12T15:43:54.000Z | python/jittor/utils/publish.py | Jittor/Jittor | bc945bae94bded917214b0afe12be6bf5b919dbe | [
"Apache-2.0"
] | null | null | null | python/jittor/utils/publish.py | Jittor/Jittor | bc945bae94bded917214b0afe12be6bf5b919dbe | [
"Apache-2.0"
] | 1 | 2020-01-12T13:17:17.000Z | 2020-01-12T13:17:17.000Z | #!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# Dun Liang <[email protected]>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Publish steps:
# 1. build,push,upload docker image[jittor/jittor]
# 2. build,push,upload docker image[jittor/jittor-cuda]
# upload to pip:
# rm -rf dist && python3.7 ./setup.py sdist && python3.7 -m twine upload dist/*
import os
docker_task(
"jittor/jittor-cuda-11-1",
"sudo docker build --tag jittor/jittor-cuda-11-1:latest -f script/Dockerfile_cuda11 . --network host"
)
docker_task(
"jittor/jittor",
"sudo docker build --tag jittor/jittor:latest . --network host"
)
docker_task(
"jittor/jittor-cuda",
"sudo docker build --tag jittor/jittor-cuda:latest --build-arg FROM_IMAGE='nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04' . --network host"
)
docker_task(
"jittor/jittor-cuda-10-1",
"sudo docker build --tag jittor/jittor-cuda-10-1:latest --build-arg FROM_IMAGE='nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04' . --network host"
)
run_cmd("ssh jittor-web Documents/jittor-blog.git/hooks/post-update") | 34.288462 | 144 | 0.647224 |
7c1a65d75547f91601127884078028e187b93021 | 588 | py | Python | prodapt_solutions/config/cliargs.py | DineshDevaraj/interview_answers | 8d3d631dc96dc97ebef80604d6455c2c57c8823d | [
"MIT"
] | null | null | null | prodapt_solutions/config/cliargs.py | DineshDevaraj/interview_answers | 8d3d631dc96dc97ebef80604d6455c2c57c8823d | [
"MIT"
] | null | null | null | prodapt_solutions/config/cliargs.py | DineshDevaraj/interview_answers | 8d3d631dc96dc97ebef80604d6455c2c57c8823d | [
"MIT"
] | null | null | null |
import argparse
from helper.metaclasses_definition import Singleton
| 24.5 | 60 | 0.685374 |
7c1c295aedd09d62a7ca4222595cff9f7fd4e5fc | 1,237 | py | Python | plugins/flytekit-papermill/setup.py | TeoZosa/flytekit | c4f33c6deaf36a3feaf397cfc6de3bd62e986733 | [
"Apache-2.0"
] | null | null | null | plugins/flytekit-papermill/setup.py | TeoZosa/flytekit | c4f33c6deaf36a3feaf397cfc6de3bd62e986733 | [
"Apache-2.0"
] | null | null | null | plugins/flytekit-papermill/setup.py | TeoZosa/flytekit | c4f33c6deaf36a3feaf397cfc6de3bd62e986733 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
PLUGIN_NAME = "papermill"
microlib_name = f"flytekitplugins-{PLUGIN_NAME}"
plugin_requires = [
"flytekit>=0.16.0b0,<1.0.0",
"flytekitplugins-spark>=0.16.0b0,<1.0.0,!=0.24.0b0",
"papermill>=1.2.0",
"nbconvert>=6.0.7",
"ipykernel>=5.0.0",
]
__version__ = "0.0.0+develop"
setup(
name=microlib_name,
version=__version__,
author="flyteorg",
author_email="[email protected]",
description="This is the flytekit papermill plugin",
namespace_packages=["flytekitplugins"],
packages=[f"flytekitplugins.{PLUGIN_NAME}"],
install_requires=plugin_requires,
license="apache2",
python_requires=">=3.7",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 30.170732 | 71 | 0.645918 |
7c1d6fd7dc1976bcfc2727fbe10b4b7b22073b1a | 705 | py | Python | 2017/third.py | vla3089/adventofcode | 0aefb5509e9f816f89eeab703393be7222632e02 | [
"Apache-2.0"
] | null | null | null | 2017/third.py | vla3089/adventofcode | 0aefb5509e9f816f89eeab703393be7222632e02 | [
"Apache-2.0"
] | null | null | null | 2017/third.py | vla3089/adventofcode | 0aefb5509e9f816f89eeab703393be7222632e02 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
input = 368078
size = 1
s_size = size * size # squared size
while (s_size < input):
size += 2
s_size = size * size
bottom_right = s_size
bottom_left = s_size - size + 1
top_left = s_size - 2 * size + 2
top_right = s_size - 3 * size + 3
input_x = -1
input_y = -1
# bottom horizontal line
if (input > bottom_left):
input_x = size - 1
input_y = input - bottom_left
elif (input > top_left):
input_y = input - top_left
input_x = 0
elif (input > top_right):
input_x = 0
input_y = size - input + top_right - 1
else:
input_x = top_right - input
input_y = size - 1
ap_x = size / 2
ap_y = ap_x
print abs(ap_x - input_x) + abs(ap_y - input_y)
| 19.054054 | 47 | 0.631206 |
7c1dfdf1304b0b11fe75fef3682da8277a3d5207 | 2,981 | py | Python | racer/methods/genetic_programming/parameterized.py | max-eth/racer | 952991aedec5d8229bb1126c9c066613f5c30146 | [
"MIT"
] | 1 | 2022-02-26T00:10:03.000Z | 2022-02-26T00:10:03.000Z | racer/methods/genetic_programming/parameterized.py | max-eth/racer | 952991aedec5d8229bb1126c9c066613f5c30146 | [
"MIT"
] | null | null | null | racer/methods/genetic_programming/parameterized.py | max-eth/racer | 952991aedec5d8229bb1126c9c066613f5c30146 | [
"MIT"
] | null | null | null | import copy
import numpy as np
from racer.utils import load_pickle
from racer.methods.genetic_programming.program_tree import ProgramTree
def __len__(self):
return sum(len(tree) for tree in self.parameterized_trees)
def set_flat_parameters(self, params):
n_used = 0
for tree in self.parameterized_trees:
for node in tree.in_order():
node.set_params(list(params[n_used : n_used + 2]))
n_used += 2
| 32.402174 | 146 | 0.637035 |
7c1e9749d62da31f126224b5dcf3c15abd4025bd | 10,568 | py | Python | base/frontends/views.py | danielecook/upvote.pub | fdda3c0895427ddc76f4680d0d63f2d4bac59da0 | [
"MIT"
] | 1 | 2020-09-13T09:16:44.000Z | 2020-09-13T09:16:44.000Z | base/frontends/views.py | danielecook/upvote.pub | fdda3c0895427ddc76f4680d0d63f2d4bac59da0 | [
"MIT"
] | null | null | null | base/frontends/views.py | danielecook/upvote.pub | fdda3c0895427ddc76f4680d0d63f2d4bac59da0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
import os
import markdown2
from flask import (Blueprint,
request,
render_template,
flash, g,
session,
redirect,
url_for,
abort,
Markup)
from werkzeug import check_password_hash, generate_password_hash
from logzero import logger
from base import db, app
from base import search as search_module # don't override function name
from base.users.forms import RegisterForm, LoginForm
from base.users.models import User
from base.threads.models import Thread, Publication
from base.subreddits.models import Subreddit
from base.users.decorators import requires_login
from base.utils.user_utils import get_school
from base.subreddits.forms import subreddit_subs, sub_form
from base.utils.email import send_email
from base.utils.misc import random_string, validate_sort_type
mod = Blueprint('frontends', __name__, url_prefix='')
def get_subreddits():
"""
Fetch user subreddits otherwise fetch a list of defaults
"""
if g.get('user'):
subreddit_subs = g.user.subreddit_subs.get('subs')
subreddits = Subreddit.query.filter(Subreddit.name.in_(subreddit_subs))
else:
# Default set of subreddits
subreddits = Subreddit.query.all()
return subreddits
def process_thread_paginator(trending=False, rs=None, subreddit=None, sort_type='hot'):
"""
abstracted because many sources pull from a thread listing
source (subreddit permalink, homepage, etc)
"""
threads_per_page = 15
cur_page = request.args.get('page') or 1
cur_page = int(cur_page)
thread_paginator = None
# if we are passing in a resultset, that means we are just looking to
# quickly paginate some arbitrary data, no sorting
if rs:
thread_paginator = rs.paginate(cur_page,
per_page=threads_per_page,
error_out=True)
return thread_paginator
# sexy line of code :)
base_query = subreddit.threads if subreddit else Thread.query
# Filter by user subs
logger.info(g.user)
if g.user:
subreddit_subs = g.user.subreddit_subs.get('subs')
base_query = base_query.join(Subreddit).filter(Subreddit.name.in_(subreddit_subs))
# Sorting
if sort_type == 'hot':
base_query = base_query.order_by(db.desc(Thread.hotness))
elif sort_type == 'top':
base_query = base_query.order_by(db.desc(Thread.votes))
elif sort_type == 'comments':
base_query = base_query.order_by(db.desc(Thread.n_comments))
elif sort_type == 'new':
base_query = base_query.order_by(db.desc(Thread.created_on))
elif sort_type == 'publication_date':
base_query = base_query.join(Publication).order_by(db.desc(Publication.pub_date))
thread_paginator = base_query.paginate(cur_page, per_page=threads_per_page, error_out=True)
return thread_paginator
| 35.582492 | 157 | 0.599924 |
7c1ed9a736672c0c84e29905bebe37cc7b644280 | 2,949 | py | Python | Jarvis.py | vijayeshmt/Securitylock | 5877663a170a22ab8b5931dcef07c74f149cf9b8 | [
"CC0-1.0"
] | 1 | 2021-05-27T09:05:00.000Z | 2021-05-27T09:05:00.000Z | Jarvis.py | vijayeshmt/Securitylock | 5877663a170a22ab8b5931dcef07c74f149cf9b8 | [
"CC0-1.0"
] | null | null | null | Jarvis.py | vijayeshmt/Securitylock | 5877663a170a22ab8b5931dcef07c74f149cf9b8 | [
"CC0-1.0"
] | null | null | null | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
# To change the voice to female change 0 to 1.
def take_command():
"""
It takes microphone input from the user and returns a string
:return:
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1.5 # It will wait 1.5 seconds to complete a sentence
audio = r.listen(source)
#Do read details
try:
print("Recognizing")
query = r.recognize_google(audio,language='en-in')
print(f'user said : {query}\n')
except Exception as e:
#print(e)
print("Say that again please")
return "None"
return query
if __name__ == '__main__':
wish_me()
while True:
query =take_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace('wikipedia','')
results = wikipedia.summary(query,sentences=2)#To read more increase sentence to decrease sentence decreease sentence
speak("According to wikipedia")
#print(results)
speak(results)
elif 'open youtube' in query:
# webbrowser.Chrome.open_new("youtube.com")
webbrowser.open("youtube.com")
elif "open google" in query:
webbrowser.open("google.com")
elif "play music" in query:
music_dir = "D:\\vijayesh\\music"
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir,songs[1]))
elif "the time" in query:
strtime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time is {strtime}")
elif " open pycharm" in query:
pycharmpath ="C:\\Program Files\\JetBrains\\PyCharm Community Edition 2021"
os.startfile(pycharmpath)
#elif "open command" in query:
# filelocation = "path of the particular file like above"
# os.startfile(filelocation)
elif " email to vijayesh" or "email to vijesh" in query:
try:
speak("What should i say")#error present
content = take_command()
to = "[email protected]"
sendEmail(to,content)
speak("Email has been sent")
exit()
except Exception as e:
print(e)
speak("Sorry,I am not able to send this email")
exit()
| 26.097345 | 121 | 0.664632 |
7c1ee1ca0bd0d4b48cc0fd831915fd050efb4c03 | 7,323 | py | Python | clients/kratos/python/test/test_v0alpha1_api.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | clients/kratos/python/test/test_v0alpha1_api.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | clients/kratos/python/test/test_v0alpha1_api.py | kolotaev/sdk | 0dda1becd70be8d7b9d678321ebe780c1ba00485 | [
"Apache-2.0"
] | null | null | null | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_kratos_client
from ory_kratos_client.api.v0alpha1_api import V0alpha1Api # noqa: E501
if __name__ == '__main__':
unittest.main()
| 28.944664 | 446 | 0.677455 |
7c1fee26199f1ac314e850e76b7a8f652294de76 | 171 | py | Python | osrsapi/__init__.py | XaKingas/osrsapi | 14b93e0f6902724e57ebb1f50d817bd557e41c3d | [
"MIT"
] | null | null | null | osrsapi/__init__.py | XaKingas/osrsapi | 14b93e0f6902724e57ebb1f50d817bd557e41c3d | [
"MIT"
] | null | null | null | osrsapi/__init__.py | XaKingas/osrsapi | 14b93e0f6902724e57ebb1f50d817bd557e41c3d | [
"MIT"
] | 1 | 2020-07-03T11:24:55.000Z | 2020-07-03T11:24:55.000Z | from .grandexchange import GrandExchange, GameItemNotFound, GameItemParseError
from .item import Item
from .priceinfo import PriceInfo
from .pricetrend import PriceTrend
| 28.5 | 78 | 0.853801 |
7c1ff3b3368700c34adbc70fc88801c1bc52b535 | 2,838 | py | Python | utils/data_loader.py | dilum1995/DAugmentor | 6cc86dccf826415a88b8226265e16ae96b5cc05b | [
"MIT"
] | 1 | 2020-08-02T13:06:03.000Z | 2020-08-02T13:06:03.000Z | utils/data_loader.py | dilum1995/DAugmentor | 6cc86dccf826415a88b8226265e16ae96b5cc05b | [
"MIT"
] | null | null | null | utils/data_loader.py | dilum1995/DAugmentor | 6cc86dccf826415a88b8226265e16ae96b5cc05b | [
"MIT"
] | null | null | null | import pandas as pd
import os
import numpy as np
import cv2
from utils import constants as const
import matplotlib.pyplot as plt | 31.88764 | 75 | 0.565891 |
7c2027c5e127752f77dcae4527133dc870a9894e | 288 | py | Python | CompilerPython/LexerPython/main.py | valternunez/Compiler | 879cecbbeb1c21d9d19021664ace62442273d3ba | [
"MIT"
] | null | null | null | CompilerPython/LexerPython/main.py | valternunez/Compiler | 879cecbbeb1c21d9d19021664ace62442273d3ba | [
"MIT"
] | null | null | null | CompilerPython/LexerPython/main.py | valternunez/Compiler | 879cecbbeb1c21d9d19021664ace62442273d3ba | [
"MIT"
] | null | null | null | from lexer import *
import sys
if len(sys.argv) != 2:
print("usage: main.py file")
else:
lex = Lexer(sys.argv[1])
with open(sys.argv[1]) as f:
while True:
c = f.read(1)
if not c:
break
print(lex.scan().toString())
| 19.2 | 40 | 0.496528 |
7c203ac0f48d46b7efacaa17d6e53845b02eb976 | 7,512 | py | Python | cms/tests/test_views.py | Ibrahem3amer/bala7 | 70638c121ea85ff0e6a650c5f2641b0b3b04d6d0 | [
"Apache-2.0"
] | null | null | null | cms/tests/test_views.py | Ibrahem3amer/bala7 | 70638c121ea85ff0e6a650c5f2641b0b3b04d6d0 | [
"Apache-2.0"
] | null | null | null | cms/tests/test_views.py | Ibrahem3amer/bala7 | 70638c121ea85ff0e6a650c5f2641b0b3b04d6d0 | [
"Apache-2.0"
] | null | null | null | from django.core.urlresolvers import resolve
from django.urls import reverse
from django.test import TestCase, RequestFactory
from django.http import HttpRequest, Http404
from django.contrib.auth.models import User
from unittest import skip
from users.models import University, Faculty, Department, UserProfile
from cms.models import Topic
from cms.views import get_topic
| 36.823529 | 117 | 0.651092 |
7c20c3110a71ede08c1358d9822f7b43bb07338f | 4,903 | py | Python | 3D/Train_Module_3D.py | geometatqueens/RCNN | 2e1e67264969f05a2f554595577dfb1025938245 | [
"Unlicense"
] | 1 | 2020-04-30T21:31:59.000Z | 2020-04-30T21:31:59.000Z | 3D/Train_Module_3D.py | geometatqueens/RCNN | 2e1e67264969f05a2f554595577dfb1025938245 | [
"Unlicense"
] | null | null | null | 3D/Train_Module_3D.py | geometatqueens/RCNN | 2e1e67264969f05a2f554595577dfb1025938245 | [
"Unlicense"
] | null | null | null | """The present code is the Version 1.0 of the RCNN approach to perform MPS
in 3D for categorical variables. It has been developed by S. Avalos and J. Ortiz in the
Geometallurygical Group at Queen's University as part of a PhD program.
The code is not free of bugs but running end-to-end.
Any comments and further improvements are well recevied to: [email protected]
April 16, 2019.
Geomet Group - Queen's University - Canada"""
# Do not display the AVX message about using GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
## #########################
import numpy as np
import tensorflow as tf
import time
import External_Functions_3D as fns_nested
import gc
for ind0 in range(1):
start_time_AllTrain = time.time()
HyperPar = []
HyperPar.append(50) # SGsizex - Num 0
HyperPar.append(50) # SGsizey - Num 1
HyperPar.append(50) # SGsizez - Num 2
HyperPar.append(int(7)) # Search_x - Num 3
HyperPar.append(int(7)) # Search_y - Num 4
HyperPar.append(int(7)) # Search_z - Num 5
HyperPar.append(int(7)) # IPsizex - Num 6
HyperPar.append(int(7)) # IPsizey - Num 7
HyperPar.append(int(7)) # IPsizez - Num 8
HyperPar.append(50) # Percentage of Data Conditioning - Num 9 .. divided by 3 so 1% is 10 represents 1%
HyperPar.append(1) # MinDC - Num 10
HyperPar.append(1500) # Num Fully Connected - Num 11
HyperPar.append(3) # wdnh - Num 12
HyperPar.append(16) # convdepth - Num 13
HyperPar.append(2) # num of categories - Num 14
print("SG: ", int(HyperPar[3]),"x",int(HyperPar[4]),"x",int(HyperPar[5]), "IP: ", int(HyperPar[6]),"x",int(HyperPar[7]),"x",int(HyperPar[8]))
Ncicles = 500
Nepoch = 1
#Nbatch = 250
Nsamples = 512
TrainingImage = "TI_Collaboration_1of4_50x50x50_newRepresentation.dat"
LocModel = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocModel = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
LocFile = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocFile = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
print("[Graph]")
#fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D_NoBN(HyperPar=HyperPar, LocModel=LocModel)
fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D(HyperPar=HyperPar, LocModel=LocModel)
# To save the TI
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3,Training=False, Padding=True)
TempSimGrid.SavePlot(name=LocModel+'_TI.png', Level=1)
MaxLR, MinLR = 0.01, 0.001
StepLR = 10
PointStart = 1
for indTrain in range(Ncicles):
#HyperPar[9] = np.random.randint(41)+10
cuos = indTrain%(2*StepLR)
if cuos < StepLR:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*cuos + MinLR, decimals=7)
else:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*(StepLR - cuos) + MaxLR, decimals=7)
start_time_1 = time.time()
print ("Cicle: {}".format(indTrain+PointStart), "Learning Rate: ", LearningRate)
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=True, Padding=True)
print("[Sim]")
TempSimGrid.Simulate_4ConvNets_BN_3D(LocModel=LocModel, Cicle=(indTrain+PointStart), Plot=True)
print("[Saving Grid]")
TempSimGrid.SaveGrid(file="{}/TrainReas_{}.txt".format(LocFile, indTrain+PointStart))
print("[Train]")
TempSimGrid.Train_4ConvNets_BN_3D(Epochs=Nepoch, Num_samples=Nsamples, LocModel=LocModel, LR=LearningRate)
print("--%s seconds of whole training process-" % (np.around((time.time() - start_time_1), decimals=2)))
gc.collect()
print(" ")
print("--%s minutes of ALL training-" % ((time.time() - start_time_AllTrain)/60)) | 53.879121 | 343 | 0.713237 |
7c21319778186a2abea07c3db5dcc502d14e209f | 1,306 | py | Python | feature_flags_project/feature_flags/providers.py | steuke/django_feature_flags_example | 00e33378999d6d567c37593c17289405fc7b5847 | [
"MIT"
] | null | null | null | feature_flags_project/feature_flags/providers.py | steuke/django_feature_flags_example | 00e33378999d6d567c37593c17289405fc7b5847 | [
"MIT"
] | 3 | 2021-09-22T18:56:38.000Z | 2021-11-29T16:11:59.000Z | feature_flags_project/feature_flags/providers.py | steuke/django_feature_flags_example | 00e33378999d6d567c37593c17289405fc7b5847 | [
"MIT"
] | null | null | null | import logging
from typing import Dict
from django.http import HttpRequest
logger = logging.getLogger(__name__)
| 32.65 | 100 | 0.712098 |
7c2377aec1cdd1edd01522b34885f68b9680468a | 82 | py | Python | src/app/database/__init__.py | roch1990/aiohttp-blog | 32e7b76b5b293d4517631ea82dfa2b268a1662eb | [
"MIT"
] | 20 | 2020-02-29T19:03:31.000Z | 2022-02-18T21:13:12.000Z | src/app/database/__init__.py | roch1990/aiohttp-blog | 32e7b76b5b293d4517631ea82dfa2b268a1662eb | [
"MIT"
] | 465 | 2020-02-29T19:08:18.000Z | 2022-03-18T22:21:49.000Z | src/app/database/__init__.py | roch1990/aiohttp-blog | 32e7b76b5b293d4517631ea82dfa2b268a1662eb | [
"MIT"
] | 26 | 2020-11-26T09:00:03.000Z | 2022-02-16T04:20:53.000Z | from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() | 27.333333 | 55 | 0.853659 |
7c23d8601d0a15002cc4ed3c1cea741aa47089e1 | 34,227 | py | Python | src/plottoolbox/functions/kde.py | timcera/plottoolbox | b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298 | [
"BSD-3-Clause"
] | null | null | null | src/plottoolbox/functions/kde.py | timcera/plottoolbox | b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298 | [
"BSD-3-Clause"
] | 6 | 2021-09-06T21:26:12.000Z | 2022-03-30T11:55:56.000Z | src/plottoolbox/functions/kde.py | timcera/plottoolbox | b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
import itertools
import os
import warnings
import mando
import numpy as np
import pandas as pd
from mando.rst_text_formatter import RSTHelpFormatter
from tstoolbox import tsutils
from .. import plotutils
warnings.filterwarnings("ignore")
# @tsutils.validator(
# ofilename=[str, ["pass", []], 1],
# type=[str, ["domain", ["kde",],], 1,],
# lag_plot_lag=[int, ["range", [1, None]], 1],
# xtitle=[str, ["pass", []], 1],
# ytitle=[str, ["pass", []], 1],
# title=[str, ["pass", []], 1],
# figsize=[float, ["range", [0, None]], 2],
# legend=[bool, ["domain", [True, False]], 1],
# legend_names=[str, ["pass", []], 1],
# subplots=[bool, ["domain", [True, False]], 1],
# sharex=[bool, ["domain", [True, False]], 1],
# sharey=[bool, ["domain", [True, False]], 1],
# colors=[str, ["pass", []], None],
# linestyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST], None],
# markerstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.MARKER_LIST], None],
# bar_hatchstyles=[str, ["domain", ["auto", None, "", " ", " "] + plotutils.HATCH_LIST], None],
# style=[str, ["pass", []], None],
# xlim=[float, ["pass", []], 2],
# ylim=[float, ["pass", []], 2],
# xaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# yaxis=[str, ["domain", ["arithmetic", "log"]], 1],
# secondary_y=[bool, ["domain", [True, False]], 1],
# mark_right=[bool, ["domain", [True, False]], 1],
# scatter_matrix_diagonal=[str, ["domain", ["kde", "hist"]], 1],
# bootstrap_size=[int, ["range", [0, None]], 1],
# xy_match_line=[str, ["pass", []], 1],
# grid=[bool, ["domain", [True, False]], 1],
# label_rotation=[float, ["pass", []], 1],
# label_skip=[int, ["range", [1, None]], 1],
# drawstyle=[str, ["pass", []], 1],
# por=[bool, ["domain", [True, False]], 1],
# invert_xaxis=[bool, ["domain", [True, False]], 1],
# invert_yaxis=[bool, ["domain", [True, False]], 1],
# plotting_position=[
# str,
# [
# "domain",
# ["weibull", "benard", "tukey", "gumbel", "hazen", "cunnane", "california"],
# ],
# 1,
# ],
# prob_plot_sort_values=[str, ["domain", ["ascending", "descending"]], 1],
# plot_styles=[
# str,
# [
# "domain",
# [
# "classic",
# "Solarize_Light2",
# "bmh",
# "dark_background",
# "fast",
# "fivethirtyeight",
# "ggplot",
# "grayscale",
# "seaborn",
# "seaborn-bright",
# "seaborn-colorblind",
# "seaborn-dark",
# "seaborn-dark-palette",
# "seaborn-darkgrid",
# "seaborn-deep",
# "seaborn-muted",
# "seaborn-notebook",
# "seaborn-paper",
# "seaborn-pastel",
# "seaborn-poster",
# "seaborn-talk",
# "seaborn-ticks",
# "seaborn-white",
# "seaborn-whitegrid",
# "tableau-colorblind10",
# "science",
# "grid",
# "ieee",
# "scatter",
# "notebook",
# "high-vis",
# "bright",
# "vibrant",
# "muted",
# "retro",
# ],
# ],
# None,
# ],
# hlines_y=[float, ["pass", []], None],
# hlines_xmin=[float, ["pass", []], None],
# hlines_xmax=[float, ["pass", []], None],
# hlines_colors=[str, ["pass", []], None],
# hlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# vlines_x=[float, ["pass", []], None],
# vlines_ymin=[float, ["pass", []], None],
# vlines_ymax=[float, ["pass", []], None],
# vlines_colors=[str, ["pass", []], None],
# vlines_linestyles=[
# str,
# ["domain", ["auto", None, "", " ", " "] + plotutils.LINE_LIST],
# None,
# ],
# )
def kde(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
names=None,
ofilename="plot.png",
xtitle="",
ytitle="",
title="",
figsize="10,6.0",
legend=None,
legend_names=None,
subplots=False,
sharex=True,
sharey=False,
colors="auto",
linestyles="auto",
markerstyles=" ",
bar_hatchstyles="auto",
style="auto",
logx=False,
logy=False,
xaxis="arithmetic",
yaxis="arithmetic",
xlim=None,
ylim=None,
secondary_y=False,
mark_right=True,
scatter_matrix_diagonal="kde",
bootstrap_size=50,
bootstrap_samples=500,
norm_xaxis=False,
norm_yaxis=False,
lognorm_xaxis=False,
lognorm_yaxis=False,
xy_match_line="",
grid=False,
label_rotation=None,
label_skip=1,
force_freq=None,
drawstyle="default",
por=False,
invert_xaxis=False,
invert_yaxis=False,
round_index=None,
plotting_position="weibull",
prob_plot_sort_values="descending",
source_units=None,
target_units=None,
lag_plot_lag=1,
plot_styles="bright",
hlines_y=None,
hlines_xmin=None,
hlines_xmax=None,
hlines_colors=None,
hlines_linestyles="-",
vlines_x=None,
vlines_ymin=None,
vlines_ymax=None,
vlines_colors=None,
vlines_linestyles="-",
**kwds,
):
r"""Plot data."""
# Need to work around some old option defaults with the implementation of
# mando
legend = bool(legend == "" or legend == "True" or legend is None)
type = "kde"
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import FixedLocator
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna="all",
source_units=source_units,
target_units=target_units,
clean=clean,
por=por,
)
tsd, lnames = plotutils.check(type, tsd, legend_names)
# This is to help pretty print the frequency
try:
try:
pltfreq = str(tsd.index.freq, "utf-8").lower()
except TypeError:
pltfreq = str(tsd.index.freq).lower()
if pltfreq.split(" ")[0][1:] == "1":
beginstr = 3
else:
beginstr = 1
if pltfreq == "none":
short_freq = ""
else:
# short freq string (day) OR (2 day)
short_freq = "({})".format(pltfreq[beginstr:-1])
except AttributeError:
short_freq = ""
if colors == "auto":
colors = None
else:
colors = tsutils.make_list(colors)
if linestyles == "auto":
linestyles = plotutils.LINE_LIST
else:
linestyles = tsutils.make_list(linestyles)
if bar_hatchstyles == "auto":
bar_hatchstyles = plotutils.HATCH_LIST
else:
bar_hatchstyles = tsutils.make_list(bar_hatchstyles)
if markerstyles == "auto":
markerstyles = plotutils.MARKER_LIST
else:
markerstyles = tsutils.make_list(markerstyles)
if markerstyles is None:
markerstyles = " "
if style != "auto":
nstyle = tsutils.make_list(style)
if len(nstyle) != len(tsd.columns):
raise ValueError(
tsutils.error_wrapper(
"""
You have to have the same number of style strings as time-series to plot.
You supplied '{}' for style which has {} style strings,
but you have {} time-series.
""".format(
style, len(nstyle), len(tsd.columns)
)
)
)
colors = []
markerstyles = []
linestyles = []
for st in nstyle:
colors.append(st[0])
if len(st) == 1:
markerstyles.append(" ")
linestyles.append("-")
continue
if st[1] in plotutils.MARKER_LIST:
markerstyles.append(st[1])
try:
linestyles.append(st[2:])
except IndexError:
linestyles.append(" ")
else:
markerstyles.append(" ")
linestyles.append(st[1:])
if linestyles is None:
linestyles = [" "]
else:
linestyles = [" " if i in [" ", None] else i for i in linestyles]
markerstyles = [" " if i is None else i for i in markerstyles]
if colors is not None:
icolors = itertools.cycle(colors)
else:
icolors = None
imarkerstyles = itertools.cycle(markerstyles)
ilinestyles = itertools.cycle(linestyles)
# Only for bar, barh, bar_stacked, and barh_stacked.
ibar_hatchstyles = itertools.cycle(bar_hatchstyles)
if (
logx is True
or logy is True
or norm_xaxis is True
or norm_yaxis is True
or lognorm_xaxis is True
or lognorm_yaxis is True
):
warnings.warn(
"""
*
* The --logx, --logy, --norm_xaxis, --norm_yaxis, --lognorm_xaxis, and
* --lognorm_yaxis options are deprecated.
*
* For --logx use --xaxis="log"
* For --logy use --yaxis="log"
* For --norm_xaxis use --type="norm_xaxis"
* For --norm_yaxis use --type="norm_yaxis"
* For --lognorm_xaxis use --type="lognorm_xaxis"
* For --lognorm_yaxis use --type="lognorm_yaxis"
*
"""
)
if xaxis == "log":
logx = True
if yaxis == "log":
logy = True
xlim = plotutils.know_your_limits(xlim, axis=xaxis)
ylim = plotutils.know_your_limits(ylim, axis=yaxis)
plot_styles = tsutils.make_list(plot_styles) + ["no-latex"]
style_loc = os.path.join(
os.path.dirname(__file__), os.pardir, "SciencePlots_styles"
)
plot_styles = [
os.path.join(style_loc, i + ".mplstyle")
if os.path.exists(os.path.join(style_loc, i + ".mplstyle"))
else i
for i in plot_styles
]
plt.style.use(plot_styles)
figsize = tsutils.make_list(figsize, n=2)
_, ax = plt.subplots(figsize=figsize)
if type in ["kde", "probability_density"]:
ax = tsd.plot.kde(
legend=legend,
subplots=subplots,
sharex=sharex,
sharey=sharey,
style=None,
logx=logx,
logy=logy,
xlim=xlim,
ylim=ylim,
secondary_y=secondary_y,
figsize=figsize,
)
for index, line in enumerate(ax.lines):
if icolors is not None:
c = next(icolors)
else:
c = None
if imarkerstyles is not None:
m = next(imarkerstyles)
else:
m = None
if ilinestyles is not None:
l = next(ilinestyles)
else:
l = None
if c is not None:
plt.setp(line, color=c)
plt.setp(line, marker=m)
plt.setp(line, linestyle=l)
ytitle = ytitle or "Density"
if legend is True:
plt.legend(loc="best")
if hlines_y is not None:
hlines_y = tsutils.make_list(hlines_y)
hlines_xmin = tsutils.make_list(hlines_xmin)
hlines_xmax = tsutils.make_list(hlines_xmax)
hlines_colors = tsutils.make_list(hlines_colors)
hlines_linestyles = tsutils.make_list(hlines_linestyles)
nxlim = ax.get_xlim()
if hlines_xmin is None:
hlines_xmin = nxlim[0]
if hlines_xmax is None:
hlines_xmax = nxlim[1]
if vlines_x is not None:
vlines_x = tsutils.make_list(vlines_x)
vlines_ymin = tsutils.make_list(vlines_ymin)
vlines_ymax = tsutils.make_list(vlines_ymax)
vlines_colors = tsutils.make_list(vlines_colors)
vlines_linestyles = tsutils.make_list(vlines_linestyles)
nylim = ax.get_ylim()
if vlines_ymin is None:
vlines_ymin = nylim[0]
if vlines_ymax is None:
vlines_ymax = nylim[1]
if type in [
"time",
"xy",
"bar",
"bar_stacked",
"histogram",
"norm_xaxis",
"lognorm_xaxis",
"weibull_xaxis",
"norm_yaxis",
"lognorm_yaxis",
"weibull_yaxis",
]:
if hlines_y is not None:
if type in ["norm_yaxis", "lognorm_yaxis", "weibull_yaxis"]:
hlines_y = ppf(tsutils.make_list(hlines_y))
plt.hlines(
hlines_y,
hlines_xmin,
hlines_xmax,
colors=hlines_colors,
linestyles=hlines_linestyles,
)
if vlines_x is not None:
if type in ["norm_xaxis", "lognorm_xaxis", "weibull_xaxis"]:
vlines_x = ppf(tsutils.make_list(vlines_x))
plt.vlines(
vlines_x,
vlines_ymin,
vlines_ymax,
colors=vlines_colors,
linestyles=vlines_linestyles,
)
plt.xlabel(xtitle)
plt.ylabel(ytitle)
if invert_xaxis is True:
plt.gca().invert_xaxis()
if invert_yaxis is True:
plt.gca().invert_yaxis()
plt.grid(grid)
plt.title(title)
plt.tight_layout()
if ofilename is not None:
plt.savefig(ofilename)
return plt
kde.__doc__ = kde_cli.__doc__
| 29.918706 | 100 | 0.530984 |
7c241e9ea6651f1832b530bacf0b946a3f610e8c | 2,255 | py | Python | src/models/GNN.py | 3verlyn/DL-abstract-argumentation | 885e442077f5f8e576092c6648077e00ceb79dff | [
"MIT"
] | 6 | 2020-05-01T10:04:16.000Z | 2021-12-12T06:35:00.000Z | src/models/GNN.py | 3verlyn/DL-abstract-argumentation | 885e442077f5f8e576092c6648077e00ceb79dff | [
"MIT"
] | 3 | 2020-05-01T09:58:16.000Z | 2021-12-05T09:24:42.000Z | src/models/GNN.py | 3verlyn/DL-abstract-argumentation | 885e442077f5f8e576092c6648077e00ceb79dff | [
"MIT"
] | 3 | 2021-12-01T12:09:40.000Z | 2022-03-08T07:35:10.000Z | from collections import OrderedDict
import torch
import torch.nn as nn
from torch_geometric.data.batch import Batch
| 32.214286 | 91 | 0.613747 |
7c247e4df77036ee1f8b8a7c4396fc03bed606ad | 977 | py | Python | configs/baselines/DACN/GNN/GCN_res_layer.py | vivek-r-2000/BoundaryNet | fce8d51a516646c1001116d03872dbba9e4c5082 | [
"MIT"
] | 17 | 2021-06-07T12:30:23.000Z | 2022-03-07T06:32:25.000Z | configs/baselines/DACN/GNN/GCN_res_layer.py | vivek-r-2000/BoundaryNet | fce8d51a516646c1001116d03872dbba9e4c5082 | [
"MIT"
] | 2 | 2021-07-13T13:24:14.000Z | 2022-03-08T07:21:09.000Z | configs/baselines/DACN/GNN/GCN_res_layer.py | vivek-r-2000/BoundaryNet | fce8d51a516646c1001116d03872dbba9e4c5082 | [
"MIT"
] | 4 | 2021-06-26T15:12:44.000Z | 2021-11-08T16:36:52.000Z | import math
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
from GNN.GCN_layer import GraphConvolution | 23.829268 | 65 | 0.63869 |
7c24dd7d64e797088cd127f5acf19696ee37ca0f | 28,569 | py | Python | mtools/util/logfile.py | lukasvosyka/mtools | b94620cef48a9eb71b6a7fa93ad88f70cd36982f | [
"Apache-2.0"
] | null | null | null | mtools/util/logfile.py | lukasvosyka/mtools | b94620cef48a9eb71b6a7fa93ad88f70cd36982f | [
"Apache-2.0"
] | null | null | null | mtools/util/logfile.py | lukasvosyka/mtools | b94620cef48a9eb71b6a7fa93ad88f70cd36982f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from __future__ import print_function
import os
import re
import sys
from datetime import datetime
from math import ceil
from mtools.util.input_source import InputSource
from mtools.util.logevent import LogEvent
states = (['PRIMARY', 'SECONDARY', 'DOWN', 'STARTUP', 'STARTUP2',
'RECOVERING', 'ROLLBACK', 'ARBITER', 'UNKNOWN'])
def __len__(self):
"""Return the number of lines in a log file."""
return self.num_lines
def _iterate_lines(self):
"""Count number of lines (can be expensive)."""
self._num_lines = 0
self._restarts = []
self._rs_state = []
ln = 0
for ln, line in enumerate(self.filehandle):
if isinstance(line, bytes):
line = line.decode("utf-8", "replace")
if (self._has_level is None and
line[28:31].strip() in LogEvent.log_levels and
line[31:39].strip() in LogEvent.log_components):
self._has_level = True
# find version string (fast check to eliminate most lines)
if "version" in line[:100]:
logevent = LogEvent(line)
restart = self._check_for_restart(logevent)
if restart:
self._restarts.append((restart, logevent))
if "starting :" in line or "starting:" in line:
# look for hostname, port
match = re.search('port=(?P<port>\d+).*host=(?P<host>\S+)',
line)
if match:
self._hostname = match.group('host')
self._port = match.group('port')
""" For 3.0 the "[initandlisten] options:" long entry contained the
"engine" field if WiredTiger was the storage engine. There were
only two engines, MMAPv1 and WiredTiger
"""
if "[initandlisten] options:" in line:
match = re.search('replSet: "(?P<replSet>\S+)"', line)
if match:
self._repl_set = match.group('replSet')
match = re.search('engine: "(?P<engine>\S+)"', line)
if match:
self._storage_engine = match.group('engine')
else:
self._storage_engine = 'mmapv1'
""" For 3.2 the "[initandlisten] options:" no longer contains the
"engine" field So now we have to look for the "[initandlisten]
wiredtiger_open config:" which was present in 3.0, but would
now tell us definitively that wiredTiger is being used
"""
if "[initandlisten] wiredtiger_open config:" in line:
self._storage_engine = 'wiredTiger'
if "command admin.$cmd command: { replSetInitiate:" in line:
match = re.search('{ _id: "(?P<replSet>\S+)", '
'members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._repl_set = match.group('replSet')
self._repl_set_members = match.group('replSetMembers')
# Replica set config logging in MongoDB 3.0+
new_config = ("New replica set config in use: ")
if new_config in line:
match = re.search('{ _id: "(?P<replSet>\S+)", '
'version: (?P<replSetVersion>\d+), ', line)
if match:
self._repl_set = match.group('replSet')
self._repl_set_version = match.group('replSetVersion')
match = re.search(', protocolVersion: (?P<replSetProtocol>\d+), ', line)
if match:
self._repl_set_protocol = match.group('replSetProtocol')
match = re.search('members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._repl_set_members = match.group('replSetMembers')
# if ("is now in state" in line and
# next(state for state in states if line.endswith(state))):
if "is now in state" in line:
tokens = line.split()
# 2.6
if tokens[1].endswith(']'):
pos = 4
else:
pos = 5
host = tokens[pos]
rs_state = tokens[-1]
state = (host, rs_state, LogEvent(line))
self._rs_state.append(state)
continue
if "[rsMgr] replSet" in line:
tokens = line.split()
if self._hostname:
host = self._hostname + ':' + self._port
else:
host = os.path.basename(self.name)
host += ' (self)'
if tokens[-1] in self.states:
rs_state = tokens[-1]
else:
# 2.6
if tokens[1].endswith(']'):
pos = 2
else:
pos = 6
rs_state = ' '.join(tokens[pos:])
state = (host, rs_state, LogEvent(line))
self._rs_state.append(state)
continue
self._num_lines = ln + 1
# reset logfile
self.filehandle.seek(0)
def _calculate_bounds(self):
"""Calculate beginning and end of logfile."""
if self._bounds_calculated:
# Assume no need to recalc bounds for lifetime of a Logfile object
return
if self.from_stdin:
return False
# we should be able to find a valid log line within max_start_lines
max_start_lines = 10
lines_checked = 0
# get start datetime
for line in self.filehandle:
logevent = LogEvent(line)
lines_checked += 1
if logevent.datetime:
self._start = logevent.datetime
self._timezone = logevent.datetime.tzinfo
self._datetime_format = logevent.datetime_format
self._datetime_nextpos = logevent._datetime_nextpos
break
if lines_checked > max_start_lines:
break
# sanity check before attempting to find end date
if (self._start is None):
raise SystemExit("Error: <%s> does not appear to be a supported "
"MongoDB log file format" % self.filehandle.name)
# get end datetime (lines are at most 10k,
# go back 30k at most to make sure we catch one)
self.filehandle.seek(0, 2)
self._filesize = self.filehandle.tell()
self.filehandle.seek(-min(self._filesize, 30000), 2)
for line in reversed(self.filehandle.readlines()):
logevent = LogEvent(line)
if logevent.datetime:
self._end = logevent.datetime
break
# if there was a roll-over, subtract 1 year from start time
if self._end < self._start:
self._start = self._start.replace(year=self._start.year - 1)
self._year_rollover = self._end
else:
self._year_rollover = False
# reset logfile
self.filehandle.seek(0)
self._bounds_calculated = True
return True
def _find_curr_line(self, prev=False):
"""
Internal helper function.
Find the current (or previous if prev=True) line in a log file based on
the current seek position.
"""
curr_pos = self.filehandle.tell()
# jump back 15k characters (at most) and find last newline char
jump_back = min(self.filehandle.tell(), 15000)
self.filehandle.seek(-jump_back, 1)
buff = self.filehandle.read(jump_back)
self.filehandle.seek(curr_pos, 0)
if prev and self.prev_pos is not None and self.prev_pos == curr_pos:
# Number of characters to show before/after the log offset
error_context = 300
self.filehandle.seek(-error_context, 1)
buff = self.filehandle.read(curr_pos)
hr = "-" * 60
print("Fatal log parsing loop detected trying to find previous "
"log line near offset %s in %s:\n\n%s\n%s\n"
"<--- (current log parsing offset) \n%s\n%s\n"
% (curr_pos, self.name, hr, buff[:error_context],
buff[error_context:error_context + 1], hr),
file=sys.stderr)
raise SystemExit("Cannot parse %s with requested options"
% self.filehandle.name)
else:
self.prev_pos = curr_pos
if isinstance(buff, bytes):
buff = buff.decode("utf-8", "replace")
newline_pos = buff.rfind('\n')
if prev:
newline_pos = buff[:newline_pos].rfind('\n')
# move back to last newline char
if newline_pos == -1:
self.filehandle.seek(0)
return self.next()
self.filehandle.seek(newline_pos - jump_back + 1, 1)
# roll forward until we found a line with a datetime
try:
logevent = self.next()
while not logevent.datetime:
logevent = self.next()
return logevent
except StopIteration:
# reached end of file
return None
def _find_sharding_info(self):
"""
Iterate over file and find any sharding related information
"""
self._shards = []
self._chunks_moved_from = []
self._chunks_moved_to = []
self._chunk_splits = []
prev_line = ""
for line in self.filehandle:
if isinstance(line, bytes):
line = line.decode("utf-8", "replace")
if self.binary == "mongos":
if "Starting new replica set monitor for" in line:
if "[mongosMain]" in line:
match = re.search("for (?P<csrsName>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
csrs_info = (match.group('csrsName'),
match.group('replSetMembers'))
self._csrs = csrs_info
else:
match = re.search("for (?P<shardName>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
shard_info = (match.group('shardName'),
match.group('replSetMembers'))
self._shards.append(shard_info)
elif self.binary == "mongod":
logevent = LogEvent(line)
if "New replica set config in use" in line:
if "configsvr: true" in line:
match = re.search(' _id: "(?P<replSet>\S+)".*'
'members: (?P<replSetMembers>[^]]+ ])', line)
if match:
self._csrs = (
match.group('replSet'),
match.group('replSetMembers')
)
if "Starting new replica set monitor for" in line:
match = re.search("for (?P<replSet>\w+)/"
"(?P<replSetMembers>\S+)", line)
if match:
if self._csrs and match.group('replSet') != self._csrs[0]:
self._shards.append((
match.group('replSet'),
match.group('replSetMembers')
))
elif not self._csrs:
self._csrs = (
match.group('replSet'),
match.group('replSetMembers')
)
if "moveChunk.from" in line:
logevent = LogEvent(line)
match = re.search('ns: "(?P<namespace>\S+)".*'
'details: { (?P<range>.*\}).*'
'to: "(?P<movedTo>\S+)".*note: "(?P<note>\S+)"', line)
if match:
time = logevent.datetime
chunk_range = match.group('range')
namespace = match.group('namespace')
moved_to = match.group('movedTo')
note = match.group('note')
if note == "success":
errmsg = None
steps = re.findall('(?P<steps>step \d of \d): (?P<stepTimes>\d+)', line)
else:
match = re.search(':: caused by :: (?P<errmsg>\S+):', prev_line)
steps = None
if match:
errmsg = match.group('errmsg')
else:
errmsg = "Unknown"
chunk_migration = (time, chunk_range, moved_to, namespace, steps, note, errmsg)
self._chunks_moved_from.append(chunk_migration)
if "moveChunk.to" in line:
logevent = LogEvent(line)
match = re.search('ns: "(?P<namespace>\S+)".*'
'details: { (?P<range>.*\}).*.*note: "(?P<note>\S+)"', line)
if match:
time = logevent.datetime
chunk_range = match.group('range')
namespace = match.group('namespace')
# TODO: alter this to find moved from shard name when SERVER-45770 TICKET is added
moved_from = "Unknown"
note = match.group('note')
if note == "success":
errmsg = None
steps = re.findall('(?P<steps>step \d of \d): (?P<stepTimes>\d+)', line)
else:
steps = None
match = re.search('errmsg: "(?P<errmsg>.*)"', line)
if match:
errmsg = match.group('errmsg')
chunk_migration = (time, chunk_range, moved_from, namespace, steps, note, errmsg)
self._chunks_moved_to.append(chunk_migration)
if "Finding the split vector for" in line:
logevent = LogEvent(line)
match = re.search('for (?P<namespace>\S+).*'
'numSplits: (?P<numSplits>\d+)', line)
if match:
time = logevent.datetime
split_range = None
namespace = match.group("namespace")
numSplits = match.group('numSplits')
success = None
time_taken = 0
error = None
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "splitVector" in line:
logevent = LogEvent(line)
match = re.search('splitVector: "(?P<namespace>\S+)".*,'
' (?P<range>min:.*), max.*op_msg (?P<time_taken>\d+)', line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
time_taken = match.group("time_taken")
numSplits = 0
success = True
error = None
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "Unable to auto-split chunk" in line:
logevent = LogEvent(line)
match = re.search("chunk \[(?P<range>.*)\) "
'in namespace (?P<namespace>\S+)'
' :: caused by :: (?P<error>\S+): ', line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
numSplits = 0
success = False
time_taken = 0
error = match.group("error")
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
elif "jumbo" in line:
logevent = LogEvent(line)
match = re.search('migration (?P<namespace>\S+): \[(?P<range>.*)\)', prev_line)
if match:
time = logevent.datetime
split_range = match.group("range")
namespace = match.group("namespace")
numSplits = 0
success = False
time_taken = 0
error = "Jumbo"
self._chunk_splits.append((time, split_range, namespace, numSplits, success, time_taken, error))
prev_line = line
# reset logfile
self.filehandle.seek(0)
def fast_forward(self, start_dt):
"""
Fast-forward file to given start_dt datetime obj using binary search.
Only fast for files. Streams need to be forwarded manually, and it will
miss the first line that would otherwise match (as it consumes the log
line).
"""
if self.from_stdin:
# skip lines until start_dt is reached
return
else:
# fast bisection path
max_mark = self.filesize
step_size = max_mark
# check if start_dt is already smaller than first datetime
self.filehandle.seek(0)
le = self.next()
if le.datetime and le.datetime >= start_dt:
self.filehandle.seek(0)
return
le = None
self.filehandle.seek(0)
# search for lower bound
while abs(step_size) > 100:
step_size = ceil(step_size / 2.)
self.filehandle.seek(step_size, 1)
le = self._find_curr_line()
if not le:
break
if le.datetime >= start_dt:
step_size = -abs(step_size)
else:
step_size = abs(step_size)
if not le:
return
# now walk backwards until we found a truly smaller line
while self.filehandle.tell() >= 2 and (le.datetime is None or
le.datetime >= start_dt):
self.filehandle.seek(-2, 1)
le = self._find_curr_line(prev=True)
| 35.755945 | 116 | 0.509573 |
7c26833e5360e6495c23a5b485ec7547b6bafa06 | 2,136 | py | Python | tests/svg.py | Tillsten/pyqtgraph | 0045863165fe526988c58cf4f8232ae2d261a5ee | [
"MIT"
] | null | null | null | tests/svg.py | Tillsten/pyqtgraph | 0045863165fe526988c58cf4f8232ae2d261a5ee | [
"MIT"
] | null | null | null | tests/svg.py | Tillsten/pyqtgraph | 0045863165fe526988c58cf4f8232ae2d261a5ee | [
"MIT"
] | null | null | null | """
SVG export test
"""
import test
import pyqtgraph as pg
app = pg.mkQApp()
if __name__ == '__main__':
test.unittest.main() | 30.514286 | 96 | 0.557116 |
7c26b3633189c7cbd7b00d1addad30f94587f9ec | 993 | py | Python | src/api/models/enums/apschedulerevents.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | 14 | 2020-12-19T15:06:13.000Z | 2022-01-12T19:52:17.000Z | src/api/models/enums/apschedulerevents.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | 43 | 2021-01-06T22:05:22.000Z | 2022-03-10T10:30:30.000Z | src/api/models/enums/apschedulerevents.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | 4 | 2020-12-18T23:10:09.000Z | 2021-04-02T13:03:12.000Z | EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
EVENT_SCHEDULER_SHUTDOWN = 2 ** 1
EVENT_SCHEDULER_PAUSED = 2 ** 2
EVENT_SCHEDULER_RESUMED = 2 ** 3
EVENT_EXECUTOR_ADDED = 2 ** 4
EVENT_EXECUTOR_REMOVED = 2 ** 5
EVENT_JOBSTORE_ADDED = 2 ** 6
EVENT_JOBSTORE_REMOVED = 2 ** 7
EVENT_ALL_JOBS_REMOVED = 2 ** 8
EVENT_JOB_ADDED = 2 ** 9
EVENT_JOB_REMOVED = 2 ** 10
EVENT_JOB_MODIFIED = 2 ** 11
EVENT_JOB_EXECUTED = 2 ** 12
EVENT_JOB_ERROR = 2 ** 13
EVENT_JOB_MISSED = 2 ** 14
EVENT_JOB_SUBMITTED = 2 ** 15
EVENT_JOB_MAX_INSTANCES = 2 ** 16
EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED |
EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED |
EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED |
EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED |
EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES) | 45.136364 | 96 | 0.75428 |
7c272bc2beff83ce709b4ecff735eaf333a85378 | 25,166 | py | Python | scripts/build/build/targets.py | mrninhvn/matter | c577b233db9d2f3a6f87108a062b1699a40c5169 | [
"Apache-2.0"
] | 2 | 2022-03-29T12:17:41.000Z | 2022-03-30T13:25:20.000Z | scripts/build/build/targets.py | mrninhvn/matter | c577b233db9d2f3a6f87108a062b1699a40c5169 | [
"Apache-2.0"
] | null | null | null | scripts/build/build/targets.py | mrninhvn/matter | c577b233db9d2f3a6f87108a062b1699a40c5169 | [
"Apache-2.0"
] | 2 | 2022-02-24T15:42:39.000Z | 2022-03-04T20:38:07.000Z | # Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from itertools import combinations
from typing import List
from builders.ameba import AmebaApp, AmebaBoard, AmebaBuilder
from builders.android import AndroidApp, AndroidBoard, AndroidBuilder
from builders.cc13x2x7_26x2x7 import cc13x2x7_26x2x7App, cc13x2x7_26x2x7Builder
from builders.cyw30739 import Cyw30739App, Cyw30739Board, Cyw30739Builder
from builders.efr32 import Efr32App, Efr32Board, Efr32Builder
from builders.esp32 import Esp32App, Esp32Board, Esp32Builder
from builders.host import HostApp, HostBoard, HostBuilder
from builders.infineon import InfineonApp, InfineonBoard, InfineonBuilder
from builders.k32w import K32WApp, K32WBuilder
from builders.mbed import MbedApp, MbedBoard, MbedBuilder, MbedProfile
from builders.nrf import NrfApp, NrfBoard, NrfConnectBuilder
from builders.qpg import QpgApp, QpgBoard, QpgBuilder
from builders.telink import TelinkApp, TelinkBoard, TelinkBuilder
from builders.tizen import TizenApp, TizenBoard, TizenBuilder
from builders.bl602 import Bl602App, Bl602Board, Bl602Builder
from builders.imx import IMXApp, IMXBuilder
def HasConflicts(items: List[BuildVariant]) -> bool:
for a, b in combinations(items, 2):
if (a.name in b.conflicts) or (b.name in a.conflicts):
return True
return False
def AllRequirementsMet(items: List[BuildVariant]) -> bool:
"""
Check that item.requires is satisfied for all items in the given list
"""
available = set([item.name for item in items])
for item in items:
for requirement in item.requires:
if requirement not in available:
return False
return True
ALL = []
target_generators = [
HostTargets(),
Esp32Targets(),
Efr32Targets(),
NrfTargets(),
AndroidTargets(),
MbedTargets(),
InfineonTargets(),
AmebaTargets(),
K32WTargets(),
cc13x2x7_26x2x7Targets(),
Cyw30739Targets(),
QorvoTargets(),
TizenTargets(),
Bl602Targets(),
IMXTargets(),
]
for generator in target_generators:
for target in generator:
ALL.append(target)
# Simple targets added one by one
ALL.append(Target('telink-tlsr9518adk80d-light', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.LIGHT))
ALL.append(Target('telink-tlsr9518adk80d-light-switch', TelinkBuilder,
board=TelinkBoard.TLSR9518ADK80D, app=TelinkApp.SWITCH))
# have a consistent order overall
ALL.sort(key=lambda t: t.name)
| 42.510135 | 140 | 0.695581 |
7c279f6e16ec9934410f291dea61230ff38bf396 | 4,608 | py | Python | src/musegan/data.py | TRINITRONIC/musegan | 0a62e0303a8ff357d7f385dcc6edba76afb132b2 | [
"MIT"
] | null | null | null | src/musegan/data.py | TRINITRONIC/musegan | 0a62e0303a8ff357d7f385dcc6edba76afb132b2 | [
"MIT"
] | null | null | null | src/musegan/data.py | TRINITRONIC/musegan | 0a62e0303a8ff357d7f385dcc6edba76afb132b2 | [
"MIT"
] | null | null | null | """This file contains functions for loading and preprocessing pianoroll data.
"""
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from musegan.config import SHUFFLE_BUFFER_SIZE, PREFETCH_SIZE
LOGGER = logging.getLogger(__name__)
# --- Data loader --------------------------------------------------------------
def load_data_from_npy(filename):
"""Load and return the training data from a npy file."""
return np.load(filename)
def load_data_from_npz(filename):
"""Load and return the training data from a npz file (sparse format)."""
with np.load(filename) as f:
data = np.zeros(f['shape'], np.bool_)
data[[x for x in f['nonzero']]] = True
return data
def load_data(data_source, data_filename):
"""Load and return the training data."""
if data_source == 'sa':
import SharedArray as sa
return sa.attach(data_filename)
if data_source == 'npy':
return load_data_from_npy(data_filename)
if data_source == 'npz':
return load_data_from_npz(data_filename)
raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
"But get " + str(data_source))
# --- Dataset Utilities -------------------------------------------------------
def random_transpose(pianoroll):
"""Randomly transpose a pianoroll with [-5, 6] semitones."""
semitone = np.random.randint(-5, 6)
if semitone > 0:
pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]
pianoroll[:, :semitone, 1:] = 0
elif semitone < 0:
pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]
pianoroll[:, semitone:, 1:] = 0
return pianoroll
def set_pianoroll_shape(pianoroll, data_shape):
"""Set the pianoroll shape and return the pianoroll."""
pianoroll.set_shape(data_shape)
return pianoroll
def set_label_shape(label):
"""Set the label shape and return the label."""
label.set_shape([1])
return label
# --- Sampler ------------------------------------------------------------------
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices]
# --- Tensorflow Dataset -------------------------------------------------------
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE)
| 39.724138 | 80 | 0.59809 |
7c2803d74bf17ec9bd8c3ff5ad734d4010f60546 | 20 | py | Python | Python/hello-world-pt-BR.py | PushpneetSingh/Hello-world | def0f44737e02fb40063cd347e93e456658e2532 | [
"MIT"
] | 1,428 | 2018-10-03T15:15:17.000Z | 2019-03-31T18:38:36.000Z | Python/hello-world-pt-BR.py | PushpneetSingh/Hello-world | def0f44737e02fb40063cd347e93e456658e2532 | [
"MIT"
] | 1,162 | 2018-10-03T15:05:49.000Z | 2018-10-18T14:17:52.000Z | Python/hello-world-pt-BR.py | PushpneetSingh/Hello-world | def0f44737e02fb40063cd347e93e456658e2532 | [
"MIT"
] | 3,909 | 2018-10-03T15:07:19.000Z | 2019-03-31T18:39:08.000Z | print(u"Ol mundo!") | 20 | 20 | 0.7 |
7c283d63bcdc25c314b3c41b483eb7c2c6064da2 | 527 | py | Python | 02-static-templates-files/02_html_template.py | saidulislam/flask-bootcamp-2 | 4ba8f5e012aa0159275ab264f0247815dcf635e6 | [
"Apache-2.0"
] | null | null | null | 02-static-templates-files/02_html_template.py | saidulislam/flask-bootcamp-2 | 4ba8f5e012aa0159275ab264f0247815dcf635e6 | [
"Apache-2.0"
] | null | null | null | 02-static-templates-files/02_html_template.py | saidulislam/flask-bootcamp-2 | 4ba8f5e012aa0159275ab264f0247815dcf635e6 | [
"Apache-2.0"
] | null | null | null | from flask import Flask,
app = Flask(__name__)
if __name__ == "__main__":
app.run(debug=True) | 31 | 91 | 0.677419 |
7c28fc0563fc8f73fd257c1d3e24a953c2e9ec7c | 1,780 | py | Python | src/compas/datastructures/mesh/bbox.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | 2 | 2021-03-17T18:14:22.000Z | 2021-09-19T13:50:02.000Z | src/compas/datastructures/mesh/bbox.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | 9 | 2019-09-11T08:53:19.000Z | 2019-09-16T08:35:39.000Z | src/compas/datastructures/mesh/bbox.py | Licini/compas | 34f65adb3d0abc3f403312ffba62aa76f3376292 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import bounding_box
from compas.geometry import bounding_box_xy
__all__ = [
'mesh_bounding_box',
'mesh_bounding_box_xy',
]
def mesh_bounding_box(mesh):
"""Compute the (axis aligned) bounding box of a mesh.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
Returns
-------
list of point
The 8 corners of the bounding box of the mesh.
Examples
--------
>>> mesh_bounding_box(mesh)
[[0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 10.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 10.0, 0.0], [0.0, 10.0, 0.0]]
"""
xyz = mesh.vertices_attributes('xyz', keys=list(mesh.vertices()))
return bounding_box(xyz)
def mesh_bounding_box_xy(mesh):
"""Compute the (axis aligned) bounding box of a projection of the mesh in the XY plane.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
Returns
-------
list of point
The 4 corners of the bounding polygon in the XY plane.
Examples
--------
>>> mesh_bounding_box_xy(mesh)
[[0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 10.0, 0.0], [0.0, 10.0, 0.0]]
"""
xyz = mesh.vertices_attributes('xyz')
return bounding_box_xy(xyz)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
import doctest
import compas
from compas.datastructures import Mesh
mesh = Mesh.from_obj(compas.get('faces.obj'))
doctest.testmod()
| 23.733333 | 148 | 0.561236 |
7c2914b1e959a72c6f1d255196bf2c603b057db4 | 210 | py | Python | crop/source_selection/__init__.py | Lars-H/federated_crop | 8e936926462aa5df5a9b8e6b42b061a3623fddf4 | [
"MIT"
] | null | null | null | crop/source_selection/__init__.py | Lars-H/federated_crop | 8e936926462aa5df5a9b8e6b42b061a3623fddf4 | [
"MIT"
] | null | null | null | crop/source_selection/__init__.py | Lars-H/federated_crop | 8e936926462aa5df5a9b8e6b42b061a3623fddf4 | [
"MIT"
] | null | null | null | from naive import NaiveSourceSelection
from star_based import StarBasedSourceSelection
from utils import AskSourceSelector, HybridSourceSelector, StatSourceSelector
from charset_selector import CharSet_Selector | 52.5 | 77 | 0.909524 |
7c29df3316dce7638b4588f6021b4bc59ffb4cfc | 151 | py | Python | base3_plus.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | 2 | 2019-01-10T03:44:03.000Z | 2019-05-24T08:50:14.000Z | base3_plus.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | base3_plus.py | Mhaiyang/iccv | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | [
"MIT"
] | null | null | null | """
@Time : 201/21/19 10:47
@Author : TaylorMei
@Email : [email protected]
@Project : iccv
@File : base3_plus.py
@Function:
""" | 15.1 | 34 | 0.596026 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.