max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
shynet/analytics/migrations/0004_auto_20210328_1514.py | f97/shynet | 1,904 | 11120686 | <gh_stars>1000+
# Generated by Django 3.1.7 on 2021-03-28 19:14
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("analytics", "0003_auto_20200502_1227"),
]
operations = [
migrations.AlterField(
model_name="hit",
name="last_seen",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name="hit",
name="start_time",
field=models.DateTimeField(
db_index=True, default=django.utils.timezone.now
),
),
migrations.AlterField(
model_name="session",
name="last_seen",
field=models.DateTimeField(
db_index=True, default=django.utils.timezone.now
),
),
migrations.AlterField(
model_name="session",
name="start_time",
field=models.DateTimeField(
db_index=True, default=django.utils.timezone.now
),
),
migrations.AddIndex(
model_name="session",
index=models.Index(
fields=["service", "-last_seen"], name="analytics_s_service_10bb96_idx"
),
),
]
|
core/src/epicli/tests/cli/engine/providers/test_provider_class_loader_aws.py | bikramlmsl/epiphany | 130 | 11120690 | from cli.engine.providers.provider_class_loader import provider_class_loader
from cli.engine.providers.aws.InfrastructureBuilder import InfrastructureBuilder
from cli.engine.providers.aws.APIProxy import APIProxy
from cli.engine.providers.aws.InfrastructureConfigCollector import InfrastructureConfigCollector
def test_provider_class_loader_infrastructurebuilder_aws():
infrastructure_builder = provider_class_loader('aws', 'InfrastructureBuilder')
assert infrastructure_builder is InfrastructureBuilder
def test_provider_class_loader_apiproxy_aws():
api_proxy = provider_class_loader('aws', 'APIProxy')
assert api_proxy is APIProxy
def test_provider_class_loader_infrastructureconfigcollector_aws():
infrastructure_config_collector = provider_class_loader('aws', 'InfrastructureConfigCollector')
assert infrastructure_config_collector is InfrastructureConfigCollector
|
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/20. swap bits in an integer.py | jaswinder9051998/Resources | 101 | 11120709 | <filename>Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/20. swap bits in an integer.py
"""
Write a function that accepts an integer and converts the integer into its binary form.
The function should then swap the two bits at positions 3 and 7 (from left) in the binary number and return the result (integer).
Example :
input = 40 (binary representation - '00101000' )
Expected output = 10 (binary representation - '00001010')
"""
def swap_bits(num):
p = 1
q = 5
if (((num & (1 << p)) >> p) ^ ((num & (1 << q)) >> q)) == 1:
num ^= (1 << p)
num ^= (1 << q)
return num
|
external/rocksdb/tools/advisor/test/test_db_stats_fetcher.py | cashbitecrypto/cashbite | 12,278 | 11120726 | <reponame>cashbitecrypto/cashbite<filename>external/rocksdb/tools/advisor/test/test_db_stats_fetcher.py
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
from advisor.db_stats_fetcher import LogStatsParser, DatabasePerfContext
from advisor.db_timeseries_parser import NO_ENTITY
from advisor.rule_parser import Condition, TimeSeriesCondition
import os
import time
import unittest
from unittest.mock import MagicMock
class TestLogStatsParser(unittest.TestCase):
def setUp(self):
this_path = os.path.abspath(os.path.dirname(__file__))
stats_file = os.path.join(
this_path, 'input_files/log_stats_parser_keys_ts'
)
# populate the keys_ts dictionary of LogStatsParser
self.stats_dict = {NO_ENTITY: {}}
with open(stats_file, 'r') as fp:
for line in fp:
stat_name = line.split(':')[0].strip()
self.stats_dict[NO_ENTITY][stat_name] = {}
token_list = line.split(':')[1].strip().split(',')
for token in token_list:
timestamp = int(token.split()[0])
value = float(token.split()[1])
self.stats_dict[NO_ENTITY][stat_name][timestamp] = value
self.log_stats_parser = LogStatsParser('dummy_log_file', 20)
self.log_stats_parser.keys_ts = self.stats_dict
def test_check_and_trigger_conditions_bursty(self):
# mock fetch_timeseries() because 'keys_ts' has been pre-populated
self.log_stats_parser.fetch_timeseries = MagicMock()
# condition: bursty
cond1 = Condition('cond-1')
cond1 = TimeSeriesCondition.create(cond1)
cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
cond1.set_parameter('behavior', 'bursty')
cond1.set_parameter('window_sec', 40)
cond1.set_parameter('rate_threshold', 0)
self.log_stats_parser.check_and_trigger_conditions([cond1])
expected_cond_trigger = {
NO_ENTITY: {1530896440: 0.9767546362322214}
}
self.assertDictEqual(expected_cond_trigger, cond1.get_trigger())
# ensure that fetch_timeseries() was called once
self.log_stats_parser.fetch_timeseries.assert_called_once()
def test_check_and_trigger_conditions_eval_agg(self):
# mock fetch_timeseries() because 'keys_ts' has been pre-populated
self.log_stats_parser.fetch_timeseries = MagicMock()
# condition: evaluate_expression
cond1 = Condition('cond-1')
cond1 = TimeSeriesCondition.create(cond1)
cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
cond1.set_parameter('behavior', 'evaluate_expression')
keys = [
'rocksdb.manifest.file.sync.micros.p99',
'rocksdb.db.get.micros.p50'
]
cond1.set_parameter('keys', keys)
cond1.set_parameter('aggregation_op', 'latest')
# condition evaluates to FALSE
cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)>200')
self.log_stats_parser.check_and_trigger_conditions([cond1])
expected_cond_trigger = {NO_ENTITY: [1792.0, 15.9638]}
self.assertIsNone(cond1.get_trigger())
# condition evaluates to TRUE
cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)<200')
self.log_stats_parser.check_and_trigger_conditions([cond1])
expected_cond_trigger = {NO_ENTITY: [1792.0, 15.9638]}
self.assertDictEqual(expected_cond_trigger, cond1.get_trigger())
# ensure that fetch_timeseries() was called
self.log_stats_parser.fetch_timeseries.assert_called()
def test_check_and_trigger_conditions_eval(self):
# mock fetch_timeseries() because 'keys_ts' has been pre-populated
self.log_stats_parser.fetch_timeseries = MagicMock()
# condition: evaluate_expression
cond1 = Condition('cond-1')
cond1 = TimeSeriesCondition.create(cond1)
cond1.set_parameter('keys', 'rocksdb.db.get.micros.p50')
cond1.set_parameter('behavior', 'evaluate_expression')
keys = [
'rocksdb.manifest.file.sync.micros.p99',
'rocksdb.db.get.micros.p50'
]
cond1.set_parameter('keys', keys)
cond1.set_parameter('evaluate', 'keys[0]-(keys[1]*100)>500')
self.log_stats_parser.check_and_trigger_conditions([cond1])
expected_trigger = {NO_ENTITY: {
1530896414: [9938.0, 16.31508],
1530896440: [9938.0, 16.346602],
1530896466: [9938.0, 16.284669],
1530896492: [9938.0, 16.16005]
}}
self.assertDictEqual(expected_trigger, cond1.get_trigger())
self.log_stats_parser.fetch_timeseries.assert_called_once()
class TestDatabasePerfContext(unittest.TestCase):
def test_unaccumulate_metrics(self):
perf_dict = {
"user_key_comparison_count": 675903942,
"block_cache_hit_count": 830086,
}
timestamp = int(time.time())
perf_ts = {}
for key in perf_dict:
perf_ts[key] = {}
start_val = perf_dict[key]
for ix in range(5):
perf_ts[key][timestamp+(ix*10)] = start_val + (2 * ix * ix)
db_perf_context = DatabasePerfContext(perf_ts, 10, True)
timestamps = [timestamp+(ix*10) for ix in range(1, 5, 1)]
values = [val for val in range(2, 15, 4)]
inner_dict = {timestamps[ix]: values[ix] for ix in range(4)}
expected_keys_ts = {NO_ENTITY: {
'user_key_comparison_count': inner_dict,
'block_cache_hit_count': inner_dict
}}
self.assertDictEqual(expected_keys_ts, db_perf_context.keys_ts)
|
tfprof/server/tfprof.py | alexbriskin/taskflow | 3,457 | 11120736 | <reponame>alexbriskin/taskflow
#!/usr/bin/env python3
# program: tfprof
import logging as logger
import time
import sys
import json
import argparse
import os
import subprocess
import requests
# run_tfprof (default)
# generate profiler data in taskflow profiler format
def run_tfprof(args):
args.output = os.path.abspath(args.output);
logger.info("profiling program \"" + ' '.join(args.program) + "\"")
## open the output file
with open(args.output, "w") as ofs:
ofs.write('[');
os.environ["TF_ENABLE_PROFILER"] = args.output;
## launch the program
prob = time.perf_counter();
subprocess.call(args.program);
proe = time.perf_counter();
logger.info(f"finished with {(proe - prob)*1000:0.2f} milliseconds");
logger.info(f"saved result to {args.output:s}");
if(args.port == None):
return;
logger.info(f"sending the result to localhost:{args.port:d}");
# run_chrome (TODO)
# generate the profiler data in chrome tracing format
# main function
def main():
# configure logger
logger.basicConfig(
#format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
level=logger.DEBUG
)
# parse the input arguments
parser = argparse.ArgumentParser();
parser.add_argument(
'-o', '--output',
type=str,
help='file to save the result (default: output.tfp)',
default="output.tfp"
)
parser.add_argument(
'-p', '--port',
type=int,
help='port number of the profiler server (default: None)',
default=None
)
parser.add_argument(
'program',
nargs=argparse.REMAINDER,
help='program to profile (e.g., path/to/binary args)'
)
args = parser.parse_args();
if(len(args.program) == 0) :
logger.error("no program specified");
sys.exit(1);
run_tfprof(args);
# main entry
if __name__ == "__main__":
main();
|
alipay/aop/api/domain/AnttechBlockchainFinanceMylogisticfinsysContractApplyModel.py | antopen/alipay-sdk-python-all | 213 | 11120746 | <filename>alipay/aop/api/domain/AnttechBlockchainFinanceMylogisticfinsysContractApplyModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainFinanceMylogisticfinsysContractApplyModel(object):
def __init__(self):
self._contract_name = None
@property
def contract_name(self):
return self._contract_name
@contract_name.setter
def contract_name(self, value):
self._contract_name = value
def to_alipay_dict(self):
params = dict()
if self.contract_name:
if hasattr(self.contract_name, 'to_alipay_dict'):
params['contract_name'] = self.contract_name.to_alipay_dict()
else:
params['contract_name'] = self.contract_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainFinanceMylogisticfinsysContractApplyModel()
if 'contract_name' in d:
o.contract_name = d['contract_name']
return o
|
Recognition-Algorithms/Recognition_using_NasNet/models/__init__.py | swapnilgarg7/Face-X | 175 | 11120762 | <filename>Recognition-Algorithms/Recognition_using_NasNet/models/__init__.py
from models.nasnet import * |
dmb/data/transforms/builder.py | jiaw-z/DenseMatchingBenchmark | 160 | 11120795 | <reponame>jiaw-z/DenseMatchingBenchmark
from . import transforms as T
def build_transforms(cfg, is_train=True):
return None
|
33. Python Programs/remove_duplicate.py | Ujjawalgupta42/Hacktoberfest2021-DSA | 225 | 11120813 | def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev = head
current = head
if head:
val = head.val
head = head.next
while (head != None):
if head.val == val:
prev.next = head.next
head = head.next
else:
val = head.val
prev = head
head = head.next
return current
|
factory-ai-vision/DevTools/utils_file.py | kaka-lin/azure-intelligent-edge-patterns | 176 | 11120816 | #!/usr/bin/env python
"""File Utilities
"""
import logging
import os
import subprocess
from logging import config
from logging_config import LOGGING_CONFIG_DEV
logger = logging.getLogger(__name__)
class FileContext:
"""File Context"""
def __init__(self, file):
self.path = os.path.realpath(file)
@property
def name(self):
return os.path.basename(self.path)
def __repr__(self):
return self.name.__repr__()
def __str__(self):
return self.name.__str__()
@property
def dir(self) -> str:
"""dir
Returns:
str: dir path
"""
return os.path.dirname(self.path)
@property
def git_root(self) -> str:
"""git_root
Returns:
str: git root path
"""
return (
subprocess.Popen(
["git", "rev-parse", "--show-toplevel"],
stdout=subprocess.PIPE,
cwd=self.dir,
)
.communicate()[0]
.rstrip()
.decode("utf-8")
)
def show(self):
"""show info"""
logger.info("Path: %s", self.path)
logger.info("Name: %s", self.name)
logger.info("Dir: %s", self.dir)
logger.info("Git: %s", self.git_root)
if __name__ == "__main__":
config.dictConfig(LOGGING_CONFIG_DEV)
fc = FileContext(__file__)
fc.show()
|
frontera/utils/add_seeds.py | buildfail/frontera | 1,267 | 11120863 | # -*- coding: utf-8 -*-
from frontera.core.manager import LocalFrontierManager
from frontera.settings import Settings
from frontera.logger.handlers import CONSOLE
from argparse import ArgumentParser
import logging
from logging.config import fileConfig
from os.path import exists
logger = logging.getLogger(__name__)
def run_add_seeds(settings, seeds_file):
fh = open(seeds_file, "rb")
logger.info("Starting local seeds addition from file %s", seeds_file)
manager = LocalFrontierManager.from_settings(settings)
manager.add_seeds(fh)
manager.stop()
manager.close()
logger.info("Seeds addition finished")
if __name__ == '__main__':
parser = ArgumentParser(description="Frontera local add seeds utility")
parser.add_argument('--config', type=str, required=True,
help='Settings module name, should be accessible by import')
parser.add_argument('--log-level', '-L', type=str, default='INFO',
help="Log level, for ex. DEBUG, INFO, WARN, ERROR, FATAL")
parser.add_argument('--seeds-file', type=str, required=True, help="Seeds file path")
args = parser.parse_args()
settings = Settings(module=args.config)
logging_config_path = settings.get("LOGGING_CONFIG")
if logging_config_path and exists(logging_config_path):
fileConfig(logging_config_path, disable_existing_loggers=False)
else:
logging.basicConfig(level=args.log_level)
logger.setLevel(args.log_level)
logger.addHandler(CONSOLE)
run_add_seeds(settings, args.seeds_file) |
test/__init__.py | logilab/rdflib-jsonld | 1,424 | 11120874 | from rdflib import plugin
from rdflib import serializer
from rdflib import parser
assert plugin
assert serializer
assert parser
import json
|
cellrank/external/kernels/__init__.py | WeilerP/cellrank | 172 | 11120902 | <gh_stars>100-1000
from cellrank.external.kernels._wot_kernel import WOTKernel
from cellrank.external.kernels._statot_kernel import StationaryOTKernel
|
4dev/style_check.py | joschu/c | 698 | 11120915 | <reponame>joschu/c
#!/usr/bin/env python
import cgt
for (name,val) in cgt.__dict__.iteritems():
if not name.startswith("_"):
if not val.__doc__:
print "API function %s requires docstring!"%name
for (name,val) in cgt.core.__dict__.iteritems():
if isinstance(val, type) and issubclass(val, cgt.core.Op):
if val.get_native_compile_info == cgt.core.Op.get_native_compile_info:
print "Op %s is missing 'get_native_compile_info'!"%name
|
lulu/extractors/ifeng.py | fakegit/Lulu | 922 | 11120926 | <gh_stars>100-1000
#!/usr/bin/env python
from html import unescape
from lulu.common import (
match1,
url_info,
print_info,
get_content,
download_urls,
playlist_not_supported,
)
__all__ = ['ifeng_download', 'ifeng_download_by_id']
site_info = '凤凰网 ifeng.com'
def ifeng_download_by_id(_id, title=None, info_only=False, **kwargs):
assert match1(
_id, r'([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})'
), _id
url = 'http://vxml.ifengimg.com/video_info_new/{}/{}/{}.xml'.format(
_id[-2], _id[-2:], _id
)
xml = get_content(url)
title = match1(xml, r'Name="([^"]+)"')
title = unescape(title)
url = match1(xml, r'VideoPlayUrl="([^"]+)"')
url = url.replace(
'http://wideo.ifeng.com/', 'http://ips.ifeng.com/wideo.ifeng.com/'
)
_, ext, size = url_info(url)
print_info(site_info, title, ext, size)
if not info_only:
download_urls([url], title, ext, size, **kwargs)
def ifeng_download(url, info_only=False, **kwargs):
# old pattern /uuid.shtml
# now it could be #uuid
_id = match1(
url,
r'([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})'
)
if _id:
return ifeng_download_by_id(_id, None, info_only=info_only, **kwargs)
html = get_content(url)
uuid_pattern = (
r'"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})"'
)
_id = match1(
html,
r'var vid="([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-'
'[0-9a-f]{12})"'
)
if _id is None:
video_pattern = r'"vid"\s*:\s*' + uuid_pattern
_id = match1(html, video_pattern)
assert _id, "Can't find video info"
return ifeng_download_by_id(_id, None, info_only=info_only, **kwargs)
download = ifeng_download
download_playlist = playlist_not_supported(site_info)
|
2019/08/05/Flask-Praetorian Walkthrough A Library for API Security With JSON Web Tokens JWT/myapi/myapi/models.py | kenjitagawa/youtube_video_code | 492 | 11120944 | <filename>2019/08/05/Flask-Praetorian Walkthrough A Library for API Security With JSON Web Tokens JWT/myapi/myapi/models.py<gh_stars>100-1000
from .extensions import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50))
password = db.Column(db.Text)
@classmethod
def lookup(cls, username):
return cls.query.filter_by(username=username).one_or_none()
@classmethod
def identify(cls, id):
return cls.query.filter_by(id=id).one_or_none()
@property
def rolenames(self):
return []
@property
def identity(self):
return self.id |
backend/util/environment_loader.py | Purus/LaunchKitDocker | 2,341 | 11120952 | <filename>backend/util/environment_loader.py
#
# Copyright 2016 Cluster Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
import os
import types
class EnvironmentLoaderError(RuntimeError):
pass
class Environment(object):
def __init__(self, name, env_dict):
self.name = name
self._env_dict = env_dict
self._module = None
def keys(self):
return self._env_dict.keys()
def get_module(self):
if not self._module:
self._module = types.ModuleType(self.name)
self.annotate_module(self._module)
return self._module
def annotate_module(self, module):
for k,v in self._env_dict.items():
setattr(module, k, v)
def load_environments(basedir, default='default', source_replace_dict=None):
json_files = {}
for filename in glob.glob(os.path.join(basedir, '*.json')):
env = os.path.splitext(os.path.basename(filename))[0]
with file(filename, 'r') as json_file:
content = json_file.read()
if source_replace_dict:
for k, v in source_replace_dict.items():
content = content.replace(k, v)
try:
json_files[env] = json.loads(content)
except ValueError as e:
raise EnvironmentLoaderError('Cannot parse %s.json! %r' % (env, e))
if default not in json_files:
raise EnvironmentLoaderError('Cannot find default %s! Choices: %s' % (default, json_files.keys()))
default_dict = json_files[default]
environments = {}
for environment_name, env_specific_dict in json_files.items():
merged_dict = default_dict.copy()
for setting in default_dict.keys():
if setting in env_specific_dict:
merged_dict[setting] = env_specific_dict[setting]
environments[environment_name] = Environment(environment_name, merged_dict)
return environments
|
test/pool-test.py | edisga/scalene | 3,952 | 11120990 | <gh_stars>1000+
import multiprocessing
pool = multiprocessing.Pool(processes=1)
pool.terminate()
|
junction/proposals/migrations/0003_auto_20150113_1401.py | theSage21/junction | 192 | 11120994 | <filename>junction/proposals/migrations/0003_auto_20150113_1401.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("proposals", "0002_auto_20150105_2220"),
]
operations = [
migrations.AlterField(
model_name="proposalsection",
name="conferences",
field=models.ManyToManyField(
to="conferences.Conference", related_name="proposal_sections"
),
preserve_default=True,
),
migrations.AlterField(
model_name="proposaltype",
name="conferences",
field=models.ManyToManyField(
to="conferences.Conference", related_name="proposal_types"
),
preserve_default=True,
),
]
|
String_or_Array/PairSum_is_X.py | Amanjakhetiya/Data_Structures_Algorithms_In_Python | 195 | 11121022 | <gh_stars>100-1000
# Find a pair of elements in the array with sum = x
"""
Method 1: If unsorted array
Time Complexity: O(n)
Space Complexity: O(n)
"""
def find_pair_unsorted(arr, x):
elem_set = set({})
# To store the indexes of both the elements
pair = [-1, -1]
for value in arr:
# if x - value has already been discovered in the array
# Pair found, return the values
if (x-value) in elem_set:
return x-value, value
# else add the current value in the elem_set
else:
elem_set.add(value)
return "Not found"
arr = [1, 4, 45, 6, 10, 8]
print('Unsorted array:', arr)
print('Pair with sum 16 in unsorted array:', find_pair_unsorted(arr, 16))
"""
Method 2: If array is sorted
Time Complexity: O(n)
Space Complexity: O(1)
"""
def find_pair_sorted(arr, x):
# initialize variables to the start and end of the array
l = 0
r = len(arr) - 1
while l < r:
pair_sum = arr[l] + arr[r]
# if pair is found
if pair_sum == x:
return arr[l], arr[r]
# if the pair sum is less than x go to the next bigger value from left
elif pair_sum < x:
l += 1
# if the pair sum is more than x go to the next lesser value from right
else:
r -= 1
# If pair not found
return "Not found"
arr = [2, 6, 10, 15, 18, 20, 23, 25]
print('Sorted array:', arr)
print('Pair with sum 28 in sorted array:', find_pair_sorted(arr, 28))
|
dags/ethereum_load_dag.py | saccodd/ethereum-etl-airflow | 204 | 11121026 | <reponame>saccodd/ethereum-etl-airflow<gh_stars>100-1000
from __future__ import print_function
import logging
from ethereumetl_airflow.build_load_dag import build_load_dag
from ethereumetl_airflow.build_load_dag_redshift import build_load_dag_redshift
from ethereumetl_airflow.variables import read_load_dag_vars
from ethereumetl_airflow.variables import read_load_dag_redshift_vars
from ethereumetl_airflow.variables import read_var
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
# Default is gcp
cloud_provider = read_var('cloud_provider', var_prefix=None, required=False, cloud_provider='gcp')
if cloud_provider == 'gcp':
# airflow DAG
DAG = build_load_dag(
dag_id='ethereum_load_dag',
chain='ethereum',
**read_load_dag_vars(
var_prefix='ethereum_',
schedule_interval='30 12 * * *'
)
)
elif cloud_provider == 'aws':
# airflow DAG
DAG = build_load_dag_redshift(
dag_id='ethereum_load_dag',
chain='ethereum',
**read_load_dag_redshift_vars(
var_prefix='ethereum_',
schedule_interval='30 1 * * *'
)
)
else:
raise ValueError('You must set a valid cloud_provider Airflow variable (gcp,aws)')
|
pipe-cli/src/utilities/pipeline_run_share_manager.py | AlfiyaRF/cloud-pipeline | 126 | 11121070 | <reponame>AlfiyaRF/cloud-pipeline<filename>pipe-cli/src/utilities/pipeline_run_share_manager.py
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import click
from prettytable import prettytable
from src.api.pipeline_run import PipelineRun
class PipelineRunShareManager(object):
def __init__(self):
pass
def get(self, run_id):
run = PipelineRun.get(run_id)
if not run:
raise RuntimeError("Failed to load run '%s'" % str(run_id))
if not run.run_sids or len(run.run_sids) == 0:
click.echo("Not shared (use 'pipe share add' to configure)")
return
self._check_run_is_running(run)
table = prettytable.PrettyTable()
table.field_names = ["User/group", "SSH shared"]
table.align = "l"
table.header = True
for sid in run.run_sids:
table.add_row([sid.name, '+' if sid.access_type == 'SSH' else ''])
click.echo(table)
def add(self, run_id, users, groups, ssh):
run = PipelineRun.get(run_id)
if not run:
click.echo("Failed to load run '%s'" % str(run_id), err=True)
sys.exit(1)
if not users and not groups or len(users) == 0 and len(groups) == 0:
click.echo("Users or groups must be specified", err=True)
sys.exit(1)
self._check_run_is_running(run)
if not run.endpoints and not ssh:
click.echo("Run doesn't have endpoints. Please, specify '-ssh' option to share ssh.", err=True)
sys.exit(1)
existing_users, existing_groups = self._get_existing_sids(run, run_id)
self._add_sids(users, existing_users, run_id, ssh, True)
self._add_sids(groups, existing_groups, run_id, ssh, False)
result = PipelineRun.update_run_sids(run_id, existing_users.values() + existing_groups.values())
if not result:
click.echo("Failed to share run '%s'" % str(run_id), err=True)
sys.exit(1)
click.echo("Done")
def remove(self, run_id, users, groups, ssh):
run = PipelineRun.get(run_id)
if not run:
click.echo("Failed to load run '%s'" % str(run_id), err=True)
sys.exit(1)
self._check_run_is_running(run)
if not users and not groups or len(users) == 0 and len(groups) == 0:
sids_to_delete = list()
click.echo("Run '%s' will be unshared for all users and groups", str(run_id))
else:
existing_users, existing_groups = self._get_existing_sids(run, run_id)
self._delete_sids(users, existing_users, run_id, ssh, True, run)
self._delete_sids(groups, existing_groups, run_id, ssh, False, run)
sids_to_delete = self._filter_nulls(existing_users.values()) + self._filter_nulls(existing_groups.values())
result = PipelineRun.update_run_sids(run_id, sids_to_delete)
if not result:
click.echo("Failed to unshare run '%s'" % str(run_id), err=True)
sys.exit(1)
click.echo("Done")
@staticmethod
def _check_run_is_running(run):
if run.status != 'RUNNING':
click.echo("Run is not running", err=True)
sys.exit(1)
@staticmethod
def _to_json(name, is_principal, access_type, run_id):
return {
"name": name,
"runId": run_id,
"isPrincipal": is_principal,
"accessType": str(access_type).upper()
}
@staticmethod
def _model_to_json(sid_model, run_id):
return PipelineRunShareManager._to_json(sid_model.name, sid_model.is_principal, sid_model.access_type, run_id)
@staticmethod
def _determine_access_type(ssh):
return 'SSH' if ssh else 'ENDPOINT'
def _delete_sids(self, sids, existing_sids, run_id, ssh, is_principal, run):
if sids:
for sid in sids:
existing_sid = existing_sids.get(sid)
if not existing_sid:
click.echo("Run '%s' was not shared for user or group '%s'" % (str(run_id), sid))
continue
if ssh and run.endpoints:
existing_sids.update({sid: self._to_json(sid, is_principal, 'ENDPOINT', run_id)})
else:
existing_sids.update({sid: None})
click.echo("Run '%s' will be unshared for user or group '%s'" % (str(run_id), sid))
@staticmethod
def _filter_nulls(sids):
return [sid for sid in sids if sid is not None]
def _get_existing_sids(self, run, run_id):
existing_users = dict()
existing_groups = dict()
for sid in run.run_sids:
if sid.is_principal:
existing_users.update({sid.name: self._model_to_json(sid, run_id)})
else:
existing_groups.update({sid.name: self._model_to_json(sid, run_id)})
return existing_users, existing_groups
def _add_sids(self, sids, existing_sids, run_id, ssh, is_principal):
if sids:
for sid in sids:
existing_sids.update({sid: self._to_json(sid, is_principal, self._determine_access_type(ssh), run_id)})
|
tests/test_model/test_head/test_mobilenet_v3_head.py | ZJCV/PyCls | 110 | 11121084 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
@date: 2020/12/30 下午9:42
@file: test_mobilenet_v3_head.py
@author: zj
@description:
"""
import torch
from zcls.model.heads.mobilenetv3_head import MobileNetV3Head
def test_mobilenet_v3_head():
data = torch.randn(1, 960, 7, 7)
model = MobileNetV3Head(
feature_dims=960,
inner_dims=1280,
num_classes=1000,
conv_layer=None,
act_layer=None
)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 1000)
if __name__ == '__main__':
test_mobilenet_v3_head()
|
vit/formatter/due_formatted.py | kinifwyne/vit | 179 | 11121110 | <filename>vit/formatter/due_formatted.py
from vit.formatter.due import Due
class DueFormatted(Due):
pass
|
src/cltk/corpora/lat/phi/file_utils.py | yelircaasi/cltk | 757 | 11121157 | <reponame>yelircaasi/cltk<filename>src/cltk/corpora/lat/phi/file_utils.py
"""Higher-level (i.e., user-friendly) functions for quickly reading
PHI5 data after it has been processed by ``TLGU()``.
"""
import os
import regex
from cltk.corpora.lat.phi.phi5_index import PHI5_INDEX, PHI5_WORKS_INDEX
from cltk.utils.file_operations import make_cltk_path
def phi5_plaintext_cleanup(text, rm_punctuation=False, rm_periods=False):
"""Remove and substitute post-processing for Latin PHI5 text.
TODO: Surely more junk to pull out. Please submit bugs!
TODO: This is a rather slow now, help in speeding up welcome.
"""
# This works OK, doesn't get some
# Note: rming all characters between {} and ()
remove_comp = regex.compile(
r"-\n|«|»|\<|\>|\.\.\.|‘|’|_|{.+?}|\(.+?\)|\(|\)|“|#|%|⚔|&|=|/|\\|〚|†|『|⚖|–|˘|⚕|☾|◌|◄|►|⌐|⌊|⌋|≈|∷|≈|∞|”|[0-9]"
)
text = remove_comp.sub("", text)
new_text = None
if rm_punctuation:
new_text = ""
punctuation = [",", ";", ":", '"', "'", "?", "-", "!", "*", "[", "]", "{", "}"]
if rm_periods:
punctuation += ["."]
for char in text:
# rm acute combining acute accents made by TLGU
# Could be caught by regex, tried and failed, not sure why
if bytes(char, "utf-8") == b"\xcc\x81":
pass
# second try at rming some punctuation; merge with above regex
elif char in punctuation:
pass
else:
new_text += char
if new_text:
text = new_text
# replace line breaks w/ space
replace_comp = regex.compile(r"\n")
text = replace_comp.sub(" ", text)
comp_space = regex.compile(r"\s+")
text = comp_space.sub(" ", text)
return text
def assemble_phi5_author_filepaths():
"""Reads PHI5 index and builds a list of absolute filepaths."""
plaintext_dir = make_cltk_path("lat/text/phi5/plaintext/")
filepaths = [os.path.join(plaintext_dir, x + ".TXT") for x in PHI5_INDEX]
return filepaths
def assemble_phi5_works_filepaths():
"""Reads PHI5 index and builds a list of absolute filepaths."""
plaintext_dir = make_cltk_path("lat/text/phi5/individual_works/")
all_filepaths = []
for author_code in PHI5_WORKS_INDEX:
author_data = PHI5_WORKS_INDEX[author_code]
works = author_data["works"]
for work in works:
f = os.path.join(plaintext_dir, author_code + ".TXT" + "-" + work + ".txt")
all_filepaths.append(f)
return all_filepaths
|
python/dp/triangle.py | googege/algo-learn | 153 | 11121159 | <reponame>googege/algo-learn
# 三角形的最短路径和
import copy
from typing import List
class Solution:
def minimumTotal_1(self, triangle: List[List[int]]) -> int:
dp = copy.copy(triangle)
for i in range(len(triangle) - 2, -1, -1):
for j in range(len(triangle[i])):
dp[i][j] = min(dp[i + 1][j], dp[i + 1][j + 1]) + triangle[i][j]
return dp[0][0]
# 简化一维的写法
def minimumTotal_2(self, triangle: List[List[int]]) -> int:
dp = [0] * (len(triangle) + 1)
for i in range(len(triangle) - 1, -1, -1):
for j in range(len(triangle[i])):
dp[j] = min(dp[j], dp[j + 1]) + triangle[i][j]
return dp[0]
# 复用自身,不新开数组
def minimumTotal_3(self, triangle: List[List[int]]) -> int:
for i in range(len(triangle) - 2, -1, -1):
for j in range(len(triangle[i])):
triangle[i][j] += min(triangle[i + 1][j + 1], triangle[i + 1][j])
return triangle[0][0]
|
tests/resources/test_service_desk.py | Glushiator/jira | 1,639 | 11121167 | import logging
from time import sleep
import pytest
from tests.conftest import JiraTestCase, broken_test
LOGGER = logging.getLogger(__name__)
class JiraServiceDeskTests(JiraTestCase):
def setUp(self):
JiraTestCase.setUp(self)
if not self.jira.supports_service_desk():
pytest.skip("Skipping Service Desk not enabled")
try:
self.jira.delete_project(self.test_manager.project_sd)
except Exception:
LOGGER.warning("Failed to delete %s", self.test_manager.project_sd)
@broken_test(reason="Broken needs fixing")
def test_create_customer_request(self):
self.jira.create_project(
key=self.test_manager.project_sd,
name=self.test_manager.project_sd_name,
ptype="service_desk",
template_name="IT Service Desk",
)
service_desks = []
for _ in range(3):
service_desks = self.jira.service_desks()
if service_desks:
break
logging.warning("Service desk not reported...")
sleep(2)
self.assertTrue(service_desks, "No service desks were found!")
service_desk = service_desks[0]
for _ in range(3):
request_types = self.jira.request_types(service_desk)
if request_types:
logging.warning("Service desk request_types not reported...")
break
sleep(2)
self.assertTrue(request_types, "No request_types for service desk found!")
request = self.jira.create_customer_request(
dict(
serviceDeskId=service_desk.id,
requestTypeId=int(request_types[0].id),
requestFieldValues=dict(
summary="Ticket title here", description="Ticket body here"
),
)
)
self.assertEqual(request.fields.summary, "Ticket title here")
self.assertEqual(request.fields.description, "Ticket body here")
|
cross3d/classes/__init__.py | vedantirb/cross3d | 129 | 11121233 | <filename>cross3d/classes/__init__.py
##
# \namespace cross3d.classes
#
# \remarks [desc::commented]
#
# \author Mikeh
# \author <NAME>
# \date 06/08/11
#
from fcurve import FCurve
from exceptions import Exceptions
from dispatch import Dispatch
from clipboard import Clipboard
from valuerange import ValueRange
from framerange import FrameRange
from filesequence import FileSequence
from timecode import Timecode
from flipbook import FlipBook
|
python/paddle_fl/mpc/examples/logistic_with_mnist/train_fc_softmax.py | barrierye/PaddleFL | 379 | 11121252 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MNIST CNN Demo (LeNet5)
"""
import sys
import os
import errno
import numpy as np
import time
import logging
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle_fl.mpc as pfl_mpc
from paddle_fl.mpc.data_utils.data_utils import get_datautils
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
mpc_protocol_name = 'aby3'
mpc_du = get_datautils(mpc_protocol_name)
role, server, port = sys.argv[1], sys.argv[2], sys.argv[3]
# modify host(localhost).
pfl_mpc.init(mpc_protocol_name, int(role), "localhost", server, int(port))
role = int(role)
# data preprocessing
BATCH_SIZE = 128
epoch_num = 1
x = pfl_mpc.data(name='x', shape=[BATCH_SIZE, 1, 28, 28], dtype='int64')
y = pfl_mpc.data(name='y', shape=[BATCH_SIZE, 10], dtype='int64')
fc_out = pfl_mpc.layers.fc(input=x, size=10)
cost, softmax = pfl_mpc.layers.softmax_with_cross_entropy(logits=fc_out,
label=y,
soft_label=True,
return_softmax=True)
infer_program = fluid.default_main_program().clone(for_test=False)
avg_loss = pfl_mpc.layers.mean(cost)
optimizer = pfl_mpc.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(avg_loss)
# prepare train and test reader
mpc_data_dir = "./mpc_data/"
if not os.path.exists(mpc_data_dir):
raise ValueError("mpc_data_dir is not found. Please prepare encrypted data.")
# train_reader
feature_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_feature", id=role, shape=(1, 28, 28))
label_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_label", id=role, shape=(10,))
batch_feature = mpc_du.batch(feature_reader, BATCH_SIZE, drop_last=True)
batch_label = mpc_du.batch(label_reader, BATCH_SIZE, drop_last=True)
# test_reader
test_feature_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_test_feature", id=role, shape=(1, 28, 28))
test_label_reader = mpc_du.load_shares(mpc_data_dir + "mnist10_test_label", id=role, shape=(10,))
test_batch_feature = mpc_du.batch(test_feature_reader, BATCH_SIZE, drop_last=True)
test_batch_label = mpc_du.batch(test_label_reader, BATCH_SIZE, drop_last=True)
place = fluid.CPUPlace()
# async data loader
loader = fluid.io.DataLoader.from_generator(feed_list=[x, y], capacity=BATCH_SIZE)
batch_sample = paddle.reader.compose(batch_feature, batch_label)
loader.set_batch_generator(batch_sample, places=place)
test_loader = fluid.io.DataLoader.from_generator(feed_list=[x, y], capacity=BATCH_SIZE)
test_batch_sample = paddle.reader.compose(test_batch_feature, test_batch_label)
test_loader.set_batch_generator(test_batch_sample, places=place)
# infer
def infer():
"""
MPC infer
"""
mpc_infer_data_dir = "./mpc_infer_data/"
if not os.path.exists(mpc_infer_data_dir):
try:
os.mkdir(mpc_infer_data_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
prediction_file = mpc_infer_data_dir + "mnist_debug_prediction"
prediction_file_part = prediction_file + ".part{}".format(role)
if os.path.exists(prediction_file_part):
os.remove(prediction_file_part)
step = 0
start_time = time.time()
for sample in test_loader():
step += 1
prediction = exe.run(program=infer_program, feed=sample, fetch_list=[softmax])
with open(prediction_file_part, 'ab') as f:
f.write(np.array(prediction).tostring())
if step % 10 == 0:
end_time = time.time()
logger.info('MPC infer of step={}, cost time in seconds:{}'.format(step, (end_time - start_time)))
end_time = time.time()
logger.info('MPC infer time in seconds:{}'.format((end_time - start_time)))
# train
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
mpc_model_basedir = "./mpc_model/"
logger.info('MPC training start...')
for epoch_id in range(epoch_num):
step = 0
epoch_start_time = time.time()
for sample in loader():
step += 1
step_start_time = time.time()
results = exe.run(feed=sample, fetch_list=[softmax])
step_end_time = time.time()
if step % 100 == 0:
logger.info('MPC training of epoch_id={} step={}, cost time in seconds:{}'
.format(epoch_id, step, (step_end_time - step_start_time)))
# For each epoch: infer or save infer program
#infer()
mpc_model_dir = mpc_model_basedir + "epoch{}/party{}".format(epoch_id, role)
fluid.io.save_inference_model(dirname=mpc_model_dir,
feeded_var_names=["x", "y"],
target_vars=[softmax],
executor=exe,
main_program=infer_program,
model_filename="__model__")
epoch_end_time = time.time()
logger.info('MPC training of epoch_id={} batch_size={}, cost time in seconds:{}'
.format(epoch_num, BATCH_SIZE, (epoch_end_time - epoch_start_time)))
# infer
infer()
|
src/genie/libs/parser/iosxe/tests/ShowIpNatTranslations/cli/equal/golden_output_vrf_verbose_expected.py | balmasea/genieparser | 204 | 11121260 | <gh_stars>100-1000
expected_output = {
"vrf": {
"genie": {
"index": {
1: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "10.144.0.2",
"outside_local": "10.1.0.2",
"protocol": "any",
"time_left": "0:0:-1",
},
2: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "120.1.211",
"outside_local": "10.1.2.21",
"protocol": "any",
"time_left": "0:1:38",
},
3: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "120.1.212",
"outside_local": "10.1.2.22",
"protocol": "any",
"time_left": "0:1:56",
},
4: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "120.1.213",
"outside_local": "10.1.2.23",
"protocol": "any",
"time_left": "0:1:30",
},
5: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "120.1.214",
"outside_local": "10.1.2.24",
"protocol": "any",
"time_left": "0:1:54",
},
6: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "120.1.215",
"outside_local": "10.1.2.25",
"protocol": "any",
"time_left": "0:1:58",
},
7: {
"group_id": 0,
"inside_global": "---",
"inside_local": "---",
"outside_global": "120.1.216",
"outside_local": "10.1.2.26",
"protocol": "any",
"time_left": "0:1:30",
},
}
}
}
}
|
tests/test_users.py | hishamnajam/python-wordpress-xmlrpc | 218 | 11121285 | from nose.plugins.attrib import attr
from tests import WordPressTestCase
from wordpress_xmlrpc.methods import users
from wordpress_xmlrpc.wordpress import WordPressUser, WordPressBlog, WordPressAuthor
class TestUsers(WordPressTestCase):
@attr('users')
@attr('pycompat')
def test_user_repr(self):
user = WordPressUser()
repr(user)
@attr('users')
@attr('pycompat')
def test_author_repr(self):
author = WordPressAuthor()
repr(author)
@attr('users')
def test_get_user(self):
user = self.client.call(users.GetUser(self.userid))
self.assertTrue(isinstance(user, WordPressUser))
self.assertEqual(user.username, self.username)
@attr('users')
def test_get_users(self):
user_list = self.client.call(users.GetUsers())
self.assert_list_of_classes(user_list, WordPressUser)
found = False
for user in user_list:
if user.id == self.userid:
found = True
break
self.assertTrue(found)
@attr('users')
def test_get_profile(self):
user = self.client.call(users.GetProfile())
self.assertTrue(isinstance(user, WordPressUser))
self.assertEqual(user.username, self.username)
@attr('users')
def test_edit_profile(self):
user = self.client.call(users.GetProfile())
self.assertTrue(isinstance(user, WordPressUser))
old_first_name = user.first_name
new_first_name = '<NAME>'
user.first_name = new_first_name
result = self.client.call(users.EditProfile(user))
self.assertTrue(result)
# check that the value changed
user2 = self.client.call(users.GetProfile())
self.assertEqual(new_first_name, user2.first_name)
# cleanup
user.first_name = old_first_name
self.client.call(users.EditProfile(user))
@attr('users')
def test_get_user_blogs(self):
blogs = self.client.call(users.GetUsersBlogs())
self.assert_list_of_classes(blogs, WordPressBlog)
@attr('users')
def test_get_authors(self):
authors = self.client.call(users.GetAuthors())
self.assert_list_of_classes(authors, WordPressAuthor)
|
ast/testdata/func_star_arg.py | MaxTurchin/pycopy-lib | 126 | 11121293 | <gh_stars>100-1000
def foo(a, b, *c):
pass
# After vararg, only kwonly's
def merge(*iterables, key=None, reverse=False):
pass
|
scripts/eval/combined_demo.py | hansheng0512/LateTemporalModeling3DCNN | 144 | 11121303 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 11:49:54 2020
@author: esat
"""
import os, sys
import collections
import numpy as np
import cv2
import math
import random
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from numpy import linalg as LA
from sklearn.metrics import confusion_matrix
datasetFolder="../../datasets"
sys.path.insert(0, "../../")
import models
from VideoSpatialPrediction3D import VideoSpatialPrediction3D
from VideoSpatialPrediction3D_bert import VideoSpatialPrediction3D_bert
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
model_names = sorted(name for name in models.__dict__
if not name.startswith("__")
and callable(models.__dict__[name]))
dataset_names = sorted(name for name in datasets.__all__)
parser = argparse.ArgumentParser(description='PyTorch Two-Stream Action Recognition RGB Test Case')
parser.add_argument('--dataset', '-d', default='hmdb51',
choices=["ucf101", "hmdb51"],
help='dataset: ucf101 | hmdb51')
parser.add_argument('--arch_flow', '-a', metavar='ARCH', default='flow_resneXt3D64f101_bert10_FRMB',
choices=model_names)
parser.add_argument('--arch_rgb', '-b', metavar='ARCH', default='rgb_resneXt3D64f101_bert10_FRMB',
choices=model_names)
parser.add_argument('-s', '--split', default=1, type=int, metavar='S',
help='which split of data to work on (default: 1)')
parser.add_argument('-w', '--window', default=3, type=int, metavar='V',
help='validation file index (default: 3)')
parser.add_argument('-v', '--val', dest='window_val', action='store_true',
help='Window Validation Selection')
multiGPUTest=False
multiGPUTrain=False
ten_crop_enabled = True
multiple_clips_enabled = True
num_seg_rgb=16
num_seg_pose=16
num_seg_flow=16
len_flow=1
poseEnabled = False
num_seg_3D = 1
length_3D = 64
def buildModel(model_path,arch,num_categories):
global multiGPUTrain
if 'rgb' in arch:
model=models.__dict__[arch](modelPath='', num_classes=num_categories,length=num_seg_rgb)
elif 'flow' in arch:
model=models.__dict__[arch](modelPath='', num_classes=num_categories,length=num_seg_flow)
params = torch.load(model_path)
if multiGPUTest:
model=torch.nn.DataParallel(model)
new_dict={"module."+k: v for k, v in params['state_dict'].items()}
model.load_state_dict(new_dict)
elif multiGPUTrain:
new_dict = {k[7:]: v for k, v in params['state_dict'].items()}
model_dict=model.state_dict()
model_dict.update(new_dict)
model.load_state_dict(model_dict)
else:
model.load_state_dict(params['state_dict'])
model.cuda()
model.eval()
return model
def main():
global args
args = parser.parse_args()
modelLocationRGB="./checkpoint/"+args.dataset+"_"+args.arch_rgb+"_split"+str(args.split)
modelLocationFlow="./checkpoint/"+args.dataset+"_"+args.arch_flow+"_split"+str(args.split)
model_path_rgb = os.path.join('../../',modelLocationRGB,'model_best.pth.tar')
model_path_flow = os.path.join('../../',modelLocationFlow,'model_best.pth.tar')
if args.dataset=='ucf101':
frameFolderName = "ucf101_frames"
elif args.dataset=='hmdb51':
frameFolderName = "hmdb51_frames"
elif args.dataset=='window':
frameFolderName = "window_frames"
data_dir=os.path.join(datasetFolder,frameFolderName)
if '64f' in args.arch_rgb:
rgb_length=64
elif '32f' in args.arch_rgb:
rgb_length=32
elif '8f' in args.arch_rgb:
rgb_length=8
else:
rgb_length=16
if '64f' in args.arch_flow:
flow_length=64
elif '32f' in args.arch_flow:
flow_length=32
elif '8f' in args.arch_flow:
flow_length=8
else:
flow_length=16
if args.window_val:
val_fileName = "window%d.txt" %(args.window)
else:
val_fileName = "val_flow_split%d.txt" %(args.split)
rgb_extension = 'img_{0:05d}.jpg'
if 'ucf101' in args.dataset or 'window' in args.dataset:
flow_extension = 'flow_{0}_{1:05d}.jpg'
elif 'hmdb51' in args.dataset:
flow_extension = 'flow_{0}_{1:05d}'
val_file=os.path.join(datasetFolder,'settings',args.dataset,val_fileName)
start_frame = 0
if args.dataset=='ucf101':
num_categories = 101
elif args.dataset=='hmdb51':
num_categories = 51
elif args.dataset=='window':
num_categories = 3
model_start_time = time.time()
spatial_net = buildModel(model_path_rgb,args.arch_rgb,num_categories)
temporal_net = buildModel(model_path_flow,args.arch_flow,num_categories)
model_end_time = time.time()
model_time = model_end_time - model_start_time
print("Action recognition model is loaded in %4.4f seconds." % (model_time))
f_val = open(val_file, "r")
val_list = f_val.readlines()
print("we got %d test videos" % len(val_list))
line_id = 1
match_count = 0
match_count_top3 = 0
y_true=[]
y_pred=[]
timeList=[]
#result_list = []
for line in val_list:
line_info = line.split(" ")
clip_path = os.path.join(data_dir,line_info[0])
duration = int(line_info[1])
input_video_label = int(line_info[2])
start = time.time()
if not multiple_clips_enabled:
_ , spatial_result, _ = VideoSpatialPrediction3D_bert(
clip_path,
spatial_net,
num_categories,
args.arch_rgb,
start_frame,
duration,
num_seg=num_seg_3D ,
length = rgb_length,
extension = rgb_extension,
ten_crop = ten_crop_enabled)
_ , temporal_result, _ = VideoSpatialPrediction3D_bert(
clip_path,
temporal_net,
num_categories,
args.arch_flow,
start_frame,
0,
num_seg=num_seg_3D ,
length = flow_length,
extension = flow_extension,
ten_crop = ten_crop_enabled)
else:
_ , spatial_result, _ = VideoSpatialPrediction3D(
clip_path,
spatial_net,
num_categories,
args.arch_rgb,
start_frame,
duration,
length = rgb_length,
extension = rgb_extension,
ten_crop = ten_crop_enabled)
_ , temporal_result, _ = VideoSpatialPrediction3D(
clip_path,
temporal_net,
num_categories,
args.arch_flow,
start_frame,
0,
length = flow_length,
extension = flow_extension,
ten_crop = ten_crop_enabled)
end = time.time()
estimatedTime=end-start
timeList.append(estimatedTime)
spatial_result = spatial_result / LA.norm(spatial_result)
temporal_result = temporal_result / LA.norm(temporal_result)
combined_result = spatial_result + temporal_result
pred_index = np.argmax(combined_result)
top3 = combined_result.argsort()[::-1][:3]
print("Sample %d/%d: GT: %d, Prediction: %d" % (line_id, len(val_list), input_video_label, pred_index))
print("Estimated Time %0.4f" % estimatedTime)
print("------------------")
if pred_index == input_video_label:
match_count += 1
if input_video_label in top3:
match_count_top3 += 1
line_id += 1
y_true.append(input_video_label)
y_pred.append(pred_index)
print(confusion_matrix(y_true,y_pred))
print("Accuracy with mean calculation is %4.4f" % (float(match_count)/len(val_list)))
print("top3 accuracy %4.4f" % (float(match_count_top3)/len(val_list)))
print(modelLocationRGB)
print(modelLocationFlow)
print("Mean Estimated Time %0.4f" % (np.mean(timeList)))
if multiple_clips_enabled:
print('multiple clips')
else:
print('one clips')
if ten_crop_enabled:
print('10 crops')
else:
print('single crop')
resultDict={'y_true':y_true,'y_pred':y_pred}
np.save('results/%s.npy' %(args.dataset+'_'+args.arch_rgb+'_'+ args.arch_flow +"_split"+str(args.split)), resultDict)
if __name__ == "__main__":
main()
|
libs/dataclass_utils.py | phc-health/covid-data-model | 155 | 11121307 | <reponame>phc-health/covid-data-model
import dataclasses
# TODO(tom): Remove dataclass_with_default_init once we are using Python 3.9. See
# https://stackoverflow.com/a/58336722
def dataclass_with_default_init(_cls=None, *args, **kwargs):
def wrap(cls):
# Save the current __init__ and remove it so dataclass will
# create the default __init__.
user_init = getattr(cls, "__init__")
delattr(cls, "__init__")
# let dataclass process our class.
result = dataclasses.dataclass(cls, *args, **kwargs)
# Restore the user's __init__ save the default init to __default_init__.
setattr(result, "__default_init__", result.__init__)
setattr(result, "__init__", user_init)
# Just in case that dataclass will return a new instance,
# (currently, does not happen), restore cls's __init__.
if result is not cls:
setattr(cls, "__init__", user_init)
return result
# Support both dataclass_with_default_init() and dataclass_with_default_init
if _cls is None:
return wrap
else:
return wrap(_cls)
|
native_client_sdk/src/build_tools/nacl_sdk_scons/nacl_utils_test.py | Scopetta197/chromium | 212 | 11121352 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for nacl_utils.py."""
import fileinput
import mox
import nacl_utils
import os
import sys
import unittest
def TestMock(file_path, open_func):
temp_file = open_func(file_path)
temp_file.close()
class TestNaClUtils(unittest.TestCase):
"""Class for test cases to cover globally declared helper functions."""
def setUp(self):
self.script_dir = os.path.abspath(os.path.dirname(__file__))
self.mock_factory = mox.Mox()
self.InitializeResourceMocks()
def InitializeResourceMocks(self):
"""Can be called multiple times if multiple functions need to be tested."""
self.fileinput_mock = self.mock_factory.CreateMock(fileinput)
self.os_mock = self.mock_factory.CreateMock(os)
self.sys_mock = self.mock_factory.CreateMock(sys)
def testToolchainPath(self):
output = nacl_utils.ToolchainPath('nacl_sdk_root')
head, tail = os.path.split(output)
base, toolchain = os.path.split(head)
self.assertEqual('nacl_sdk_root', base)
self.assertEqual('toolchain', toolchain)
self.assertRaises(ValueError,
nacl_utils.ToolchainPath,
'nacl_sdk_root',
arch='nosucharch')
self.assertRaises(ValueError,
nacl_utils.ToolchainPath,
'nacl_sdk_root',
variant='nosuchvariant')
def testGetJSONFromNexeSpec(self):
valid_empty_json = '{\n "program": {\n }\n}\n'
null_json = nacl_utils.GetJSONFromNexeSpec(None)
self.assertEqual(null_json, valid_empty_json)
empty_json = nacl_utils.GetJSONFromNexeSpec({})
self.assertEqual(empty_json, valid_empty_json)
nexes = {'x86-32': 'nacl_x86_32.nexe',
'x86-64': 'nacl_x86_64.nexe',
'arm': 'nacl_ARM.nexe'}
json = nacl_utils.GetJSONFromNexeSpec(nexes)
# Assert that the resulting JSON has all the right parts: the "nexes"
# dict, followed by one entry for each architecture. Also make sure that
# the last entry doesn't have a trailing ','
json_lines = json.splitlines()
self.assertEqual(len(json_lines), 7)
self.assertEqual(json_lines[0], '{')
self.assertEqual(json_lines[1], ' "program": {')
self.assertTrue(json_lines[2].endswith(','))
self.assertTrue(json_lines[3].endswith(','))
self.assertFalse(json_lines[4].endswith(','))
self.assertEqual(json_lines[5], ' }')
self.assertEqual(json_lines[6], '}')
# Assert that the key-value pair lines have the right form. The order
# of the keys doesn't matter. Note that the key values are enclosed in
# "" (e.g. "x86-32") - this is intentional.
valid_arch_keys = ['"x86-32"', '"x86-64"', '"arm"']
for line in json_lines[2:4]:
key_value = line.split(':')
self.assertEqual(len(key_value), 3)
self.assertTrue(key_value[0].lstrip().rstrip() in valid_arch_keys)
def testGenerateNmf(self):
# Assert that failure cases properly fail.
self.assertRaises(ValueError, nacl_utils.GenerateNmf, None, None, None)
self.assertRaises(ValueError, nacl_utils.GenerateNmf, [], [], {})
def testGetArchFromSpec(self):
default_arch, default_subarch = nacl_utils.GetArchFromSpec(None)
self.assertEqual(default_arch, nacl_utils.DEFAULT_ARCH)
self.assertEqual(default_subarch, nacl_utils.DEFAULT_SUBARCH)
default_arch, subarch = nacl_utils.GetArchFromSpec({'subarch': '64'})
self.assertEqual(default_arch, nacl_utils.DEFAULT_ARCH)
self.assertEqual(subarch, '64')
arch, default_subarch = nacl_utils.GetArchFromSpec({'arch': 'x86'})
self.assertEqual(arch, 'x86')
self.assertEqual(default_subarch, nacl_utils.DEFAULT_SUBARCH)
arch, subarch = nacl_utils.GetArchFromSpec({'arch': 'x86', 'subarch': '64'})
self.assertEqual(arch, 'x86')
self.assertEqual(subarch, '64')
def RunTests():
return_value = 1
test_suite = unittest.TestLoader().loadTestsFromTestCase(TestNaClUtils)
test_results = unittest.TextTestRunner(verbosity=2).run(test_suite)
if test_results.wasSuccessful():
return_value = 0
return return_value
if __name__ == '__main__':
sys.exit(RunTests())
|
runway/templates/sls-py/__init__.py | paul-duffy/runway | 134 | 11121364 | <reponame>paul-duffy/runway
"""Empty file for python import traversal.""" # pylint: disable=all
|
samples/bulk_update.py | oniram22/orionsdk-python | 177 | 11121369 | <reponame>oniram22/orionsdk-python
import requests
from orionsdk import SwisClient
npm_server = 'localhost'
username = 'admin'
password = ''
verify = False
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
swis = SwisClient(npm_server, username, password)
# select the top 3 nodes from the inventory
results = swis.query("SELECT TOP 3 N.CustomProperties.Uri FROM Orion.Nodes N")
# extract just the Uris from the results
uris = [row['Uri'] for row in results['results']]
# submit the request
swis.bulkupdate(uris, City='Austin', DeviceType='Router', Department='Billing')
|
siliconcompiler/core.py | siliconcompiler/siliconcompiler | 424 | 11121401 | # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import argparse
import base64
import time
import datetime
import multiprocessing
import tarfile
import traceback
import asyncio
from subprocess import run, PIPE
import os
import glob
import pathlib
import sys
import gzip
import re
import json
import logging
import hashlib
import shutil
import copy
import importlib
import textwrap
import math
import pandas
import yaml
import graphviz
import time
import uuid
import shlex
import platform
import getpass
import csv
import distro
import netifaces
import webbrowser
import packaging.version
import packaging.specifiers
from jinja2 import Environment, FileSystemLoader
from pathlib import Path
from timeit import default_timer as timer
from siliconcompiler.client import *
from siliconcompiler.schema import *
from siliconcompiler.scheduler import _deferstep
from siliconcompiler import leflib
from siliconcompiler import utils
from siliconcompiler import _metadata
import psutil
class TaskStatus():
# Could use Python 'enum' class here, but that doesn't work nicely with
# schema.
PENDING = 'pending'
SUCCESS = 'success'
ERROR = 'error'
class Chip:
"""Object for configuring and executing hardware design flows.
This is the main object used for configuration, data, and
execution within the SiliconCompiler platform.
Args:
design (string): Name of the top level chip design module.
Examples:
>>> siliconcompiler.Chip(design="top")
Creates a chip object with name "top".
"""
###########################################################################
def __init__(self, design, loglevel=None):
# version numbers
self.scversion = _metadata.version
self.schemaversion = SCHEMA_VERSION
# Local variables
self.scroot = os.path.dirname(os.path.abspath(__file__))
self.cwd = os.getcwd()
self.error = 0
self.cfg = schema_cfg()
# The 'status' dictionary can be used to store ephemeral config values.
# Its contents will not be saved, and can be set by parent scripts
# such as a web server or supervisor process. Currently supported keys:
# * 'jobhash': A hash or UUID which can identify jobs in a larger system.
# * 'remote_cfg': Dictionary containing remote server configurations
# (address, credentials, etc.)
# * 'slurm_account': User account ID in a connected slurm HPC cluster.
# * 'slurm_partition': Name of the partition in which a task should run
# on a connected slurm HPC cluster.
# * 'watchdog': Activity-monitoring semaphore for jobs scheduled on an
# HPC cluster; expects a 'threading.Event'-like object.
# * 'max_fs_bytes': A limit on how much disk space a job is allowed
# to consume in a connected HPC cluster's storage.
self.status = {}
self.builtin = ['minimum','maximum',
'nop', 'mux', 'join', 'verify']
# We set 'design' and 'loglevel' directly in the config dictionary
# because of a chicken-and-egg problem: self.set() relies on the logger,
# but the logger relies on these values.
self.cfg['design']['value'] = design
if loglevel:
self.cfg['option']['loglevel']['value'] = loglevel
self._init_logger()
self._loaded_modules = {
'flows': [],
'pdks': [],
'libs': [],
'checklists': []
}
###########################################################################
@property
def design(self):
'''Design name of chip object.
This is an immutable property.'''
return self.get('design')
###########################################################################
def _init_logger(self, step=None, index=None, in_run=False):
self.logger = logging.getLogger(uuid.uuid4().hex)
# Don't propagate log messages to "root" handler (we get duplicate
# messages without this)
# TODO: this prevents us from being able to capture logs with pytest:
# we should revisit it
self.logger.propagate = False
loglevel = self.get('option', 'loglevel')
if loglevel=='DEBUG':
prefix = '| %(levelname)-7s | %(funcName)-10s | %(lineno)-4s'
else:
prefix = '| %(levelname)-7s'
if in_run:
flow = self.get('option', 'flow')
# Figure out how wide to make step and index fields
max_step_len = 2
max_index_len = 2
for future_step in self.getkeys('flowgraph', flow):
max_step_len = max(len(future_step) + 1, max_step_len)
for future_index in self.getkeys('flowgraph', flow, future_step):
max_index_len = max(len(future_index) + 1, max_index_len)
jobname = self.get('option', 'jobname')
if step is None:
step = '-' * max(max_step_len // 4, 1)
if index is None:
index = '-' * max(max_index_len // 4, 1)
run_info = f'%s | %-{max_step_len}s | %-{max_index_len}s' % (jobname, step, index)
logformat = ' | '.join([prefix, run_info, '%(message)s'])
else:
logformat = ' | '.join([prefix, '%(message)s'])
handler = logging.StreamHandler()
formatter = logging.Formatter(logformat)
handler.setFormatter(formatter)
# Clear any existing handlers so we don't end up with duplicate messages
# if repeat calls to _init_logger are made
if len(self.logger.handlers) > 0:
self.logger.handlers.clear()
self.logger.addHandler(handler)
self.logger.setLevel(loglevel)
###########################################################################
def _deinit_logger(self):
self.logger = None
###########################################################################
def _get_switches(self, *keypath):
'''Helper function for parsing switches and metavars for a keypath.'''
#Switch field fully describes switch format
switch = self.get(*keypath, field='switch')
if switch is None:
switches = []
elif isinstance(switch, list):
switches = switch
else:
switches = [switch]
switchstrs = []
# parse out switch from metavar
# TODO: should we validate that metavar matches for each switch?
for switch in switches:
switchmatch = re.match(r'(-[\w_]+)\s+(.*)', switch)
gccmatch = re.match(r'(-[\w_]+)(.*)', switch)
plusmatch = re.match(r'(\+[\w_\+]+)(.*)', switch)
if switchmatch:
switchstr = switchmatch.group(1)
metavar = switchmatch.group(2)
elif gccmatch:
switchstr = gccmatch.group(1)
metavar = gccmatch.group(2)
elif plusmatch:
switchstr = plusmatch.group(1)
metavar = plusmatch.group(2)
switchstrs.append(switchstr)
return switchstrs, metavar
###########################################################################
def create_cmdline(self, progname, description=None, switchlist=None, input_map=None):
"""Creates an SC command line interface.
Exposes parameters in the SC schema as command line switches,
simplifying creation of SC apps with a restricted set of schema
parameters exposed at the command line. The order of command
line switch settings parsed from the command line is as follows:
1. loglevel
2. fpga_partname
3. load_target('target')
4. read_manifest([cfg])
5. all other switches
The cmdline interface is implemented using the Python argparse package
and the following use restrictions apply.
* Help is accessed with the '-h' switch.
* Arguments that include spaces must be enclosed with double quotes.
* List parameters are entered individually. (ie. -y libdir1 -y libdir2)
* For parameters with Boolean types, the switch implies "true".
* Special characters (such as '-') must be enclosed in double quotes.
* Compiler compatible switches include: -D, -I, -O{0,1,2,3}
* Verilog legacy switch formats are supported: +libext+, +incdir+
Args:
progname (str): Name of program to be executed.
description (str): Short program description.
switchlist (list of str): List of SC parameter switches to expose
at the command line. By default all SC schema switches are
available. Parameter switches should be entered based on the
parameter 'switch' field in the schema. For parameters with
multiple switches, both will be accepted if any one is included
in this list.
input_map (dict of str): Dictionary mapping file extensions to input
filetypes. This is used to automatically assign positional
source arguments to ['input', ...] keypaths based on their file
extension. If None, the CLI will not accept positional source
arguments.
Examples:
>>> chip.create_cmdline(progname='sc-show',switchlist=['-input','-cfg'])
Creates a command line interface for 'sc-show' app.
>>> chip.create_cmdline(progname='sc', input_map={'v': 'verilog'})
All sources ending in .v will be stored in ['input', 'verilog']
"""
# Argparse
parser = argparse.ArgumentParser(prog=progname,
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description)
# Get all keys from global dictionary or override at command line
allkeys = self.getkeys()
# Iterate over all keys to add parser arguments
for keypath in allkeys:
#Fetch fields from leaf cell
helpstr = self.get(*keypath, field='shorthelp')
typestr = self.get(*keypath, field='type')
# argparse 'dest' must be a string, so join keypath with commas
dest = '_'.join(keypath)
switchstrs, metavar = self._get_switches(*keypath)
# Three switch types (bool, list, scalar)
if not switchlist or any(switch in switchlist for switch in switchstrs):
if typestr == 'bool':
parser.add_argument(*switchstrs,
nargs='?',
metavar=metavar,
dest=dest,
const='true',
help=helpstr,
default=argparse.SUPPRESS)
#list type arguments
elif re.match(r'\[', typestr):
#all the rest
parser.add_argument(*switchstrs,
metavar=metavar,
dest=dest,
action='append',
help=helpstr,
default=argparse.SUPPRESS)
else:
#all the rest
parser.add_argument(*switchstrs,
metavar=metavar,
dest=dest,
help=helpstr,
default=argparse.SUPPRESS)
if input_map is not None:
parser.add_argument('source',
nargs='*',
help='Input files with filetype inferred by extension')
#Preprocess sys.argv to enable linux commandline switch formats
#(gcc, verilator, etc)
scargs = []
# Iterate from index 1, otherwise we end up with script name as a
# 'source' positional argument
for item in sys.argv[1:]:
#Split switches with one character and a number after (O0,O1,O2)
opt = re.match(r'(\-\w)(\d+)', item)
#Split assign switches (-DCFG_ASIC=1)
assign = re.search(r'(\-\w)(\w+\=\w+)', item)
#Split plusargs (+incdir+/path)
plusarg = re.search(r'(\+\w+\+)(.*)', item)
if opt:
scargs.append(opt.group(1))
scargs.append(opt.group(2))
elif plusarg:
scargs.append(plusarg.group(1))
scargs.append(plusarg.group(2))
elif assign:
scargs.append(assign.group(1))
scargs.append(assign.group(2))
else:
scargs.append(item)
parser.add_argument('-version', action='version', version=_metadata.version)
#Grab argument from pre-process sysargs
cmdargs = vars(parser.parse_args(scargs))
# Print banner
print(_metadata.banner)
print("Authors:", ", ".join(_metadata.authors))
print("Version:", _metadata.version, "\n")
print("-"*80)
os.environ["COLUMNS"] = '80'
# 1. set loglevel if set at command line
if 'option_loglevel' in cmdargs.keys():
self.logger.setLevel(cmdargs['option_loglevel'])
# 2. read in target if set
if 'option_target' in cmdargs.keys():
if 'arg_pdk' in cmdargs.keys():
raise NotImplementedError("NOT IMPLEMENTED: ['arg', 'pdk'] parameter with target")
if 'arg_flow' in cmdargs.keys():
raise NotImplementedError("NOT IMPLEMENTED: ['arg', 'flow'] parameter with target")
if 'fpga_partname' in cmdargs.keys():
self.set('fpga', 'partname', cmdargs['fpga_partname'], clobber=True)
# running target command
self.load_target(cmdargs['option_target'])
# 4. read in all cfg files
if 'option_cfg' in cmdargs.keys():
for item in cmdargs['option_cfg']:
self.read_manifest(item, clobber=True, clear=True)
# Map sources to ['input'] keypath.
if 'source' in cmdargs:
for source in cmdargs['source']:
_, ext = os.path.splitext(source)
ext = ext.lstrip('.')
if ext in input_map:
filetype = input_map[ext]
if self.valid('input', filetype, quiet=True):
self.add('input', filetype, source)
else:
self.set('input', filetype, source)
self.logger.info(f'Source {source} inferred as {filetype}')
else:
self.logger.warning('Unable to infer input type for '
f'{source} based on file extension, ignoring. Use the '
'-input flag to provide it explicitly.')
# we don't want to handle this in the next loop
del cmdargs['source']
# 5. Cycle through all command args and write to manifest
for dest, vals in cmdargs.items():
keypath = dest.split('_')
# Turn everything into a list for uniformity
if not isinstance(vals, list):
vals = [vals]
# Cycle through all items
for item in vals:
# Hack to handle the fact that we want optmode stored with an 'O'
# prefix.
if keypath == ['option', 'optmode']:
item = 'O' + item
num_free_keys = keypath.count('default')
if len(item.split(' ')) < num_free_keys + 1:
# Error out if value provided doesn't have enough words to
# fill in 'default' keys.
switches, metavar = self._get_switches(*keypath)
switchstr = '/'.join(switches)
self.logger.error(f'Invalid value {item} for switch {switchstr}. Expected format {metavar}.')
raise SiliconCompilerError('Invalid CLI arguments')
# We replace 'default' in keypath with first N words in provided
# value. Remainder is the actual value we want to store in the
# parameter.
*free_keys, val = item.split(' ', num_free_keys)
args = [free_keys.pop(0) if key == 'default' else key for key in keypath]
# Storing in manifest
self.logger.info(f"Command line argument entered: {args} Value: {val}")
typestr = self.get(*keypath, field='type')
if typestr.startswith('['):
if self.valid(*args, quiet=True):
self.add(*args, val)
else:
self.set(*args, val, clobber=True)
else:
self.set(*args, val, clobber=True)
#########################################################################
def find_function(self, modulename, funcname, moduletype=None):
'''
Returns a function attribute from a module on disk.
Searches the SC root directory and the 'scpath' parameter for the
modulename provided and imports the module if found. If the funcname
provided is found in the module, a callable function attribute is
returned, otherwise None is returned.
The function assumes the following directory structure:
* tools/modulename/modulename.py
* flows/modulename.py
* pdks/modulname.py
If the moduletype is None, the module paths are search in the
order: 'targets'->'flows'->'tools'->'pdks'->'libs'->'checklists'):
Supported functions include:
* targets (make_docs, setup)
* pdks (make_docs, setup)
* flows (make_docs, setup)
* tools (make_docs, setup, check_version, runtime_options,
pre_process, post_process)
* libs (make_docs, setup)
Args:
modulename (str): Name of module to import.
funcname (str): Name of the function to find within the module.
moduletype (str): Type of module (flows, pdks, libs, checklists, targets).
Examples:
>>> setup_pdk = chip.find_function('freepdk45', 'setup', 'pdks')
>>> setup_pdk()
Imports the freepdk45 module and runs the setup_pdk function
'''
# module search path depends on modtype
if moduletype is None:
for item in ('targets', 'flows', 'tools', 'pdks', 'libs', 'checklists'):
fullpath = self.find_function(modulename, funcname, module_type=item)
if fullpath:
break
self.logger.error(f"Could not find module {modulename}")
self.error = 1
return None
elif moduletype in ('targets','flows', 'pdks', 'libs'):
fullpath = self._find_sc_file(f"{moduletype}/{modulename}.py", missing_ok=True)
elif moduletype in ('tools', 'checklists'):
fullpath = self._find_sc_file(f"{moduletype}/{modulename}/{modulename}.py", missing_ok=True)
else:
self.logger.error(f"Illegal module type '{moduletype}'.")
self.error = 1
return
# try loading module if found
if fullpath:
self.logger.debug(f"Loading function '{funcname}' from module '{modulename}'")
try:
spec = importlib.util.spec_from_file_location(modulename, fullpath)
imported = importlib.util.module_from_spec(spec)
spec.loader.exec_module(imported)
if hasattr(imported, funcname):
function = getattr(imported, funcname)
else:
function = None
return function
except:
traceback.print_exc()
self.logger.error(f"Module setup failed for '{modulename}'")
self.error = 1
##########################################################################
def load_target(self, name):
"""
Loads a target module and runs the setup() function.
The function searches the $SCPATH for targets/<name>.py and runs
the setup function in that module if found.
Args:
name (str): Module name
flow (str): Target flow to
Examples:
>>> chip.load_target('freepdk45_demo')
Loads the 'freepdk45_demo' target
"""
self.set('option', 'target', name)
func = self.find_function(name, 'setup', 'targets')
if func is not None:
func(self)
else:
self.logger.error(f'Target module {name} not found in $SCPATH or siliconcompiler/targets/.')
raise SiliconCompilerError(f'Target module {name} not found $SCPATH or siliconcompiler/targets/.')
##########################################################################
def load_pdk(self, name):
"""
Loads a PDK module and runs the setup() function.
The function searches the $SCPATH for pdks/<name>.py and runs
the setup function in that module if found.
Args:
name (str): Module name
Examples:
>>> chip.load_pdk('freepdk45_pdk')
Loads the 'freepdk45' pdk
"""
func = self.find_function(name, 'setup', 'pdks')
if func is not None:
self.logger.info(f"Loading PDK '{name}'")
self._loaded_modules['pdks'].append(name)
func(self)
else:
self.logger.error(f'PDK module {name} not found in $SCPATH or siliconcompiler/pdks/.')
raise SiliconCompilerError(f'PDK module {name} not found in $SCPATH or siliconcompiler/pdks/.')
##########################################################################
def load_flow(self, name):
"""
Loads a flow module and runs the setup() function.
The function searches the $SCPATH for flows/<name>.py and runs
the setup function in that module if found.
Args:
name (str): Module name
Examples:
>>> chip.load_flow('asicflow')
Loads the 'asicflow' flow
"""
func = self.find_function(name, 'setup', 'flows')
if func is not None:
self.logger.info(f"Loading flow '{name}'")
self._loaded_modules['flows'].append(name)
func(self)
else:
self.logger.error(f'Flow module {name} not found in $SCPATH or siliconcompiler/flows/.')
raise SiliconCompilerError(f'Flow module {name} not found in $SCPATH or siliconcompiler/flows/.')
##########################################################################
def load_lib(self, name):
"""
Loads a library module and runs the setup() function.
The function searches the $SCPATH for libs/<name>.py and runs
the setup function in that module if found.
Args:
name (str): Module name
Examples:
>>> chip.load_lib('nangate45')
Loads the 'nangate45' library
"""
func = self.find_function(name, 'setup', 'libs')
if func is not None:
self.logger.info(f"Loading library '{name}'")
self._loaded_modules['libs'].append(name)
func(self)
else:
self.logger.error(f'Library module {name} not found in $SCPATH or siliconcompiler/libs/.')
raise SiliconCompilerError(f'Library module {name} not found in $SCPATH or siliconcompiler/libs/.')
##########################################################################
def load_checklist(self, name):
"""
Loads a checklist module and runs the setup() function.
The function searches the $SCPATH for checklist/<name>/<name>.py and runs
the setup function in that module if found.
Args:
name (str): Module name
Examples:
>>> chip.load_checklist('oh_tapeout')
Loads the 'oh_tapeout' checklist
"""
func = self.find_function(name, 'setup', 'checklists')
if func is not None:
self.logger.info(f"Loading checklist '{name}'")
self._loaded_modules['checklists'].append(name)
func(self)
else:
self.logger.error(f'Checklist module {name} not found in $SCPATH or siliconcompiler/checklists/.')
raise SiliconCompilerError(f'Checklist module {name} not found in $SCPATH or siliconcompiler/checklists/.')
###########################################################################
def list_metrics(self):
'''
Returns a list of all metrics in the schema.
'''
return self.getkeys('metric','default','default')
###########################################################################
def help(self, *keypath):
"""
Returns a schema parameter description.
Args:
*keypath(str): Keypath to parameter.
Returns:
A formatted multi-line help paragraph for the parameter provided.
Examples:
>>> print(chip.help('asic','diearea'))
Displays help information about the 'asic, diearea' parameter
"""
self.logger.debug('Fetching help for %s', keypath)
#Fetch Values
description = self.get(*keypath, field='shorthelp')
typestr = self.get(*keypath, field='type')
switchstr = str(self.get(*keypath, field='switch'))
defstr = str(self.get(*keypath, field='defvalue'))
requirement = str(self.get(*keypath, field='require'))
helpstr = self.get(*keypath, field='help')
example = self.get(*keypath, field='example')
examplestr = ("\nExamples: " + example[0] + ''.join(
["\n " + ex for ex in example[1:]]))
#Removing multiple spaces and newlines
helpstr = helpstr.rstrip()
helpstr = helpstr.replace("\n", "")
helpstr = ' '.join(helpstr.split())
for idx, item in enumerate(example):
example[idx] = ' '.join(item.split())
example[idx] = example[idx].replace(", ", ",")
#Wrap text
para = textwrap.TextWrapper(width=60)
para_list = para.wrap(text=helpstr)
#Full Doc String
fullstr = ("-"*80 +
"\nDescription: " + description +
"\nSwitch: " + switchstr +
"\nType: " + typestr +
"\nRequirement: " + requirement +
"\nDefault: " + defstr +
examplestr +
"\nHelp: " + para_list[0] + "\n")
for line in para_list[1:]:
fullstr = (fullstr +
" "*13 + line.lstrip() + "\n")
return fullstr
###########################################################################
def valid(self, *args, valid_keypaths=None, quiet=True, default_valid=False):
"""
Checks validity of a keypath.
Checks the validity of a parameter keypath and returns True if the
keypath is valid and False if invalid.
Args:
keypath(list str): Variable length schema key list.
valid_keypaths (list of list): List of valid keypaths as lists. If
None, check against all keypaths in the schema.
quiet (bool): If True, don't display warnings for invalid keypaths.
Returns:
Boolean indicating validity of keypath.
Examples:
>>> check = chip.valid('design')
Returns True.
>>> check = chip.valid('blah')
Returns False.
"""
keypathstr = ','.join(args)
keylist = list(args)
if default_valid:
default = 'default'
else:
default = None
if valid_keypaths is None:
valid_keypaths = self.getkeys()
# Look for a full match with default playing wild card
for valid_keypath in valid_keypaths:
if len(keylist) != len(valid_keypath):
continue
ok = True
for i in range(len(keylist)):
if valid_keypath[i] not in (keylist[i], default):
ok = False
break
if ok:
return True
# Match not found
if not quiet:
self.logger.warning(f"Keypath [{keypathstr}] is not valid")
return False
###########################################################################
def get(self, *keypath, field='value', job=None, cfg=None):
"""
Returns a schema parameter field.
Returns a schema parameter field based on the keypath provided in the
``*keypath``. See the :ref:`Schema Reference Manual<SiliconCompiler
Schema>` for documentation of all supported keypaths. The returned type
is consistent with the type field of the parameter. Fetching parameters
with empty or undefined value files returns None for scalar types and []
(empty list) for list types. Accessing a non-existent keypath produces
a logger error message and raises the Chip object error flag.
Args:
keypath(list str): Variable length schema key list.
field(str): Parameter field to fetch.
job (str): Jobname to use for dictionary access in place of the
current active jobname.
cfg(dict): Alternate dictionary to access in place of the default
chip object schema dictionary.
Returns:
Value found for the keypath and field provided.
Examples:
>>> foundry = chip.get('pdk', 'foundry')
Returns the name of the foundry from the PDK.
"""
if cfg is None:
if job is not None:
cfg = self.cfg['history'][job]
else:
cfg = self.cfg
keypathstr = ','.join(keypath)
self.logger.debug(f"Reading from [{keypathstr}]. Field = '{field}'")
return self._search(cfg, keypathstr, *keypath, field=field, mode='get')
###########################################################################
def getkeys(self, *keypath, cfg=None, job=None):
"""
Returns a list of schema dictionary keys.
Searches the schema for the keypath provided and returns a list of
keys found, excluding the generic 'default' key. Accessing a
non-existent keypath produces a logger error message and raises the
Chip object error flag.
Args:
keypath (list str): Variable length ordered schema key list
cfg (dict): Alternate dictionary to access in place of self.cfg
job (str): Jobname to use for dictionary access in place of the
current active jobname.
Returns:
List of keys found for the keypath provided.
Examples:
>>> keylist = chip.getkeys('pdk')
Returns all keys for the 'pdk' keypath.
>>> keylist = chip.getkeys()
Returns all list of all keypaths in the schema.
"""
if cfg is None:
if job is None:
cfg = self.cfg
else:
cfg = self.cfg['history'][job]
if len(list(keypath)) > 0:
keypathstr = ','.join(keypath)
self.logger.debug('Getting schema parameter keys for: %s', keypathstr)
keys = list(self._search(cfg, keypathstr, *keypath, mode='getkeys'))
if 'default' in keys:
keys.remove('default')
else:
self.logger.debug('Getting all schema parameter keys.')
keys = list(self._allkeys(cfg))
return keys
###########################################################################
def getdict(self, *keypath, cfg=None):
"""
Returns a schema dictionary.
Searches the schema for the keypath provided and returns a complete
dictionary. Accessing a non-existent keypath produces a logger error
message and raises the Chip object error flag.
Args:
keypath(list str): Variable length ordered schema key list
cfg(dict): Alternate dictionary to access in place of self.cfg
Returns:
A schema dictionary
Examples:
>>> pdk = chip.getdict('pdk')
Returns the complete dictionary found for the keypath 'pdk'
"""
if cfg is None:
cfg = self.cfg
if len(list(keypath)) > 0:
keypathstr = ','.join(keypath)
self.logger.debug('Getting cfg for: %s', keypathstr)
localcfg = self._search(cfg, keypathstr, *keypath, mode='getcfg')
return copy.deepcopy(localcfg)
###########################################################################
def set(self, *args, field='value', clobber=True, cfg=None):
'''
Sets a schema parameter field.
Sets a schema parameter field based on the keypath and value provided in
the ``*args``. See the :ref:`Schema Reference Manual<SiliconCompiler
Schema>` for documentation of all supported keypaths. New schema
dictionaries are automatically created for keypaths that overlap with
'default' dictionaries. The write action is ignored if the parameter
value is non-empty and the clobber option is set to False.
The value provided must agree with the dictionary parameter 'type'.
Accessing a non-existent keypath or providing a value that disagrees
with the parameter type produces a logger error message and raises the
Chip object error flag.
Args:
args (list): Parameter keypath followed by a value to set.
field (str): Parameter field to set.
clobber (bool): Existing value is overwritten if True.
cfg(dict): Alternate dictionary to access in place of self.cfg
Examples:
>>> chip.set('design', 'top')
Sets the name of the design to 'top'
'''
if cfg is None:
cfg = self.cfg
# Verify that all keys are strings
for key in args[:-1]:
if not isinstance(key,str):
self.logger.error(f"Key [{key}] is not a string [{args}]")
keypathstr = ','.join(args[:-1])
all_args = list(args)
# Special case to ensure loglevel is updated ASAP
if len(args) == 3 and args[1] == 'loglevel' and field == 'value':
self.logger.setLevel(args[2])
self.logger.debug(f"Setting [{keypathstr}] to {args[-1]}")
return self._search(cfg, keypathstr, *all_args, field=field, mode='set', clobber=clobber)
###########################################################################
def add(self, *args, cfg=None, field='value'):
'''
Adds item(s) to a schema parameter list.
Adds item(s) to schema parameter list based on the keypath and value
provided in the ``*args``. See the :ref:`Schema Reference
Manual<SiliconCompiler Schema>` for documentation of all supported
keypaths. New schema dictionaries are automatically created for keypaths
that overlap with 'default' dictionaries.
The value provided must agree with the dictionary parameter 'type'.
Accessing a non-existent keypath, providing a value that disagrees
with the parameter type, or using add with a scalar parameter produces
a logger error message and raises the Chip object error flag.
Args:
args (list): Parameter keypath followed by a value to add.
cfg(dict): Alternate dictionary to access in place of self.cfg
field (str): Parameter field to set.
Examples:
>>> chip.add('source', 'hello.v')
Adds the file 'hello.v' to the list of sources.
'''
if cfg is None:
cfg = self.cfg
# Verify that all keys are strings
for key in args[:-1]:
if not isinstance(key,str):
self.logger.error(f"Key [{key}] is not a string [{args}]")
keypathstr = ','.join(args[:-1])
all_args = list(args)
self.logger.debug(f'Appending value {args[-1]} to [{keypathstr}]')
return self._search(cfg, keypathstr, *all_args, field=field, mode='add')
###########################################################################
def _allkeys(self, cfg, keys=None, keylist=None):
'''
Returns list of all keypaths in the schema.
'''
if keys is None:
keylist = []
keys = []
for k in cfg:
newkeys = keys.copy()
newkeys.append(k)
if 'defvalue' in cfg[k]:
keylist.append(newkeys)
else:
self._allkeys(cfg[k], keys=newkeys, keylist=keylist)
return keylist
###########################################################################
def _search(self, cfg, keypath, *args, field='value', mode='get', clobber=True):
'''
Internal recursive function that searches the Chip schema for a
match to the combination of *args and fields supplied. The function is
used to set and get data within the dictionary.
Args:
cfg(dict): The cfg schema to search
keypath (str): Concatenated keypath used for error logging.
args (str): Keypath/value variable list used for access
field(str): Leaf cell field to access.
mode(str): Action (set/get/add/getkeys/getkeys)
clobber(bool): Specifies to clobber (for set action)
'''
all_args = list(args)
param = all_args[0]
val = all_args[-1]
empty = [None, 'null', [], 'false']
#set/add leaf cell (all_args=(param,val))
if (mode in ('set', 'add')) & (len(all_args) == 2):
# clean error if key not found
if (not param in cfg) & (not 'default' in cfg):
self.logger.error(f"Set/Add keypath [{keypath}] does not exist.")
self.error = 1
else:
# making an 'instance' of default if not found
if (not param in cfg) & ('default' in cfg):
cfg[param] = copy.deepcopy(cfg['default'])
list_type =bool(re.match(r'\[', cfg[param]['type']))
# checking for illegal fields
if not field in cfg[param] and (field != 'value'):
self.logger.error(f"Field '{field}' for keypath [{keypath}]' is not a valid field.")
self.error = 1
# check legality of value
if field == 'value':
(type_ok,type_error) = self._typecheck(cfg[param], param, val)
if not type_ok:
self.logger.error("%s", type_error)
self.error = 1
# converting python True/False to lower case string
if (field == 'value') and (cfg[param]['type'] == 'bool'):
if val == True:
val = "true"
elif val == False:
val = "false"
# checking if value has been set
# TODO: fix clobber!!
selval = cfg[param]['value']
# updating values
if cfg[param]['lock'] == "true":
self.logger.debug("Ignoring {mode}{} to [{keypath}]. Lock bit is set.")
elif (mode == 'set'):
if (field != 'value') or (selval in empty) or clobber:
if field in ('copy', 'lock'):
# boolean fields
if val is True:
cfg[param][field] = "true"
elif val is False:
cfg[param][field] = "false"
else:
self.logger.error(f'{field} must be set to boolean.')
self.error = 1
elif field in ('hashalgo', 'scope', 'require', 'type', 'unit',
'shorthelp', 'notes', 'switch', 'help'):
# awlays string scalars
cfg[param][field] = val
elif field in ('example'):
# list from default schema (already a list)
cfg[param][field] = val
elif field in ('signature', 'filehash', 'date', 'author'):
# convert to list if appropriate
if isinstance(val, list) | (not list_type):
cfg[param][field] = val
else:
cfg[param][field] = [val]
elif (not list_type) & (val is None):
# special case for None
cfg[param][field] = None
elif (not list_type) & (not isinstance(val, list)):
# convert to string for scalar value
cfg[param][field] = str(val)
elif list_type & (not isinstance(val, list)):
# convert to string for list value
cfg[param][field] = [str(val)]
elif list_type & isinstance(val, list):
# converting tuples to strings
if re.search(r'\(', cfg[param]['type']):
cfg[param][field] = list(map(str,val))
else:
cfg[param][field] = val
else:
self.logger.error(f"Assigning list to scalar for [{keypath}]")
self.error = 1
else:
self.logger.debug(f"Ignoring set() to [{keypath}], value already set. Use clobber=true to override.")
elif (mode == 'add'):
if field in ('filehash', 'date', 'author', 'signature'):
cfg[param][field].append(str(val))
elif field in ('copy', 'lock'):
self.logger.error(f"Illegal use of add() for scalar field {field}.")
self.error = 1
elif list_type & (not isinstance(val, list)):
cfg[param][field].append(str(val))
elif list_type & isinstance(val, list):
cfg[param][field].extend(val)
else:
self.logger.error(f"Illegal use of add() for scalar parameter [{keypath}].")
self.error = 1
return cfg[param][field]
#get leaf cell (all_args=param)
elif len(all_args) == 1:
if not param in cfg:
self.error = 1
self.logger.error(f"Get keypath [{keypath}] does not exist.")
elif mode == 'getcfg':
return cfg[param]
elif mode == 'getkeys':
return cfg[param].keys()
else:
if not (field in cfg[param]) and (field!='value'):
self.error = 1
self.logger.error(f"Field '{field}' not found for keypath [{keypath}]")
elif field == 'value':
#Select default if no value has been set
if field not in cfg[param]:
selval = cfg[param]['defvalue']
else:
selval = cfg[param]['value']
#check for list
if bool(re.match(r'\[', cfg[param]['type'])):
sctype = re.sub(r'[\[\]]', '', cfg[param]['type'])
return_list = []
if selval is None:
return None
for item in selval:
if sctype == 'int':
return_list.append(int(item))
elif sctype == 'float':
return_list.append(float(item))
elif sctype.startswith('(str,'):
if isinstance(item,tuple):
return_list.append(item)
else:
tuplestr = re.sub(r'[\(\)\'\s]','',item)
return_list.append(tuple(tuplestr.split(',')))
elif sctype.startswith('(float,'):
if isinstance(item,tuple):
return_list.append(item)
else:
tuplestr = re.sub(r'[\(\)\s]','',item)
return_list.append(tuple(map(float, tuplestr.split(','))))
else:
return_list.append(item)
return return_list
else:
if selval is None:
# Unset scalar of any type
scalar = None
elif cfg[param]['type'] == "int":
#print(selval, type(selval))
scalar = int(float(selval))
elif cfg[param]['type'] == "float":
scalar = float(selval)
elif cfg[param]['type'] == "bool":
scalar = (selval == 'true')
elif re.match(r'\(', cfg[param]['type']):
tuplestr = re.sub(r'[\(\)\s]','',selval)
scalar = tuple(map(float, tuplestr.split(',')))
else:
scalar = selval
return scalar
#all non-value fields are strings (or lists of strings)
else:
if cfg[param][field] == 'true':
return True
elif cfg[param][field] == 'false':
return False
else:
return cfg[param][field]
#if not leaf cell descend tree
else:
##copying in default tree for dynamic trees
if not param in cfg and 'default' in cfg:
cfg[param] = copy.deepcopy(cfg['default'])
elif not param in cfg:
self.error = 1
self.logger.error(f"Get keypath [{keypath}] does not exist.")
return None
all_args.pop(0)
return self._search(cfg[param], keypath, *all_args, field=field, mode=mode, clobber=clobber)
###########################################################################
def _prune(self, cfg, top=True, keeplists=False):
'''
Internal recursive function that creates a local copy of the Chip
schema (cfg) with only essential non-empty parameters retained.
'''
# create a local copy of dict
if top:
localcfg = copy.deepcopy(cfg)
else:
localcfg = cfg
#10 should be enough for anyone...
maxdepth = 10
i = 0
#Prune when the default & value are set to the following
if keeplists:
empty = ("null", None)
else:
empty = ("null", None, [])
# When at top of tree loop maxdepth times to make sure all stale
# branches have been removed, not elegant, but stupid-simple
# "good enough"
while i < maxdepth:
#Loop through all keys starting at the top
for k in list(localcfg.keys()):
#removing all default/template keys
# reached a default subgraph, delete it
if k == 'default':
del localcfg[k]
# reached leaf-cell
elif 'help' in localcfg[k].keys():
del localcfg[k]['help']
elif 'example' in localcfg[k].keys():
del localcfg[k]['example']
elif 'defvalue' in localcfg[k].keys():
if localcfg[k]['defvalue'] in empty:
if 'value' in localcfg[k].keys():
if localcfg[k]['value'] in empty:
del localcfg[k]
else:
del localcfg[k]
#removing stale branches
elif not localcfg[k]:
localcfg.pop(k)
#keep traversing tree
else:
self._prune(cfg=localcfg[k], top=False, keeplists=keeplists)
if top:
i += 1
else:
break
return localcfg
###########################################################################
def _find_sc_file(self, filename, missing_ok=False):
"""
Returns the absolute path for the filename provided.
Searches the SC root directory and the 'scpath' parameter for the
filename provided and returns the absolute path. If no valid absolute
path is found during the search, None is returned.
Shell variables ('$' followed by strings consisting of numbers,
underscores, and digits) are replaced with the variable value.
Args:
filename (str): Relative or absolute filename.
Returns:
Returns absolute path of 'filename' if found, otherwise returns
None.
Examples:
>>> chip._find_sc_file('flows/asicflow.py')
Returns the absolute path based on the sc installation directory.
"""
# Replacing environment variables
filename = self._resolve_env_vars(filename)
# If we have a path relative to our cwd or an abs path, pass-through here
if os.path.exists(os.path.abspath(filename)):
return os.path.abspath(filename)
# Otherwise, search relative to scpaths
scpaths = [self.scroot, self.cwd]
scpaths.extend(self.get('option', 'scpath'))
if 'SCPATH' in os.environ:
scpaths.extend(os.environ['SCPATH'].split(os.pathsep))
searchdirs = ', '.join(scpaths)
self.logger.debug(f"Searching for file {filename} in {searchdirs}")
result = None
for searchdir in scpaths:
if not os.path.isabs(searchdir):
searchdir = os.path.join(self.cwd, searchdir)
abspath = os.path.abspath(os.path.join(searchdir, filename))
if os.path.exists(abspath):
result = abspath
break
if result is None and not missing_ok:
self.error = 1
self.logger.error(f"File {filename} was not found")
return result
###########################################################################
def find_files(self, *keypath, cfg=None, missing_ok=False, job=None):
"""
Returns absolute paths to files or directories based on the keypath
provided.
By default, this function first checks if the keypath provided has its
`copy` parameter set to True. If so, it returns paths to the files in
the build directory. Otherwise, it resolves these files based on the
current working directory and SC path.
The keypath provided must point to a schema parameter of type file, dir,
or lists of either. Otherwise, it will trigger an error.
Args:
keypath (list str): Variable length schema key list.
cfg (dict): Alternate dictionary to access in place of the default
chip object schema dictionary.
missing_ok (bool): If True, silently return None when files aren't
found. If False, print an error and set the error flag.
job (str): Jobname to use for dictionary access in place of the
current active jobname.
Returns:
If keys points to a scalar entry, returns an absolute path to that
file/directory, or None if not found. It keys points to a list
entry, returns a list of either the absolute paths or None for each
entry, depending on whether it is found.
Examples:
>>> chip.find_files('source')
Returns a list of absolute paths to source files, as specified in
the schema.
"""
if cfg is None:
cfg = self.cfg
copyall = self.get('option', 'copyall', cfg=cfg, job=job)
paramtype = self.get(*keypath, field='type', cfg=cfg, job=job)
if 'file' in paramtype:
copy = self.get(*keypath, field='copy', cfg=cfg, job=job)
else:
copy = False
if 'file' not in paramtype and 'dir' not in paramtype:
self.logger.error('Can only call find_files on file or dir types')
self.error = 1
return None
is_list = bool(re.match(r'\[', paramtype))
paths = self.get(*keypath, cfg=cfg, job=job)
# Convert to list if we have scalar
if not is_list:
paths = [paths]
result = []
# Special cases for various ['eda', ...] files that may be implicitly
# under the workdir (or refdir in the case of scripts).
# TODO: it may be cleaner to have a file resolution scope flag in schema
# (e.g. 'scpath', 'workdir', 'refdir'), rather than harcoding special
# cases.
if keypath[0] == 'tool' and keypath[2] in ('input', 'output', 'report'):
step = keypath[3]
index = keypath[4]
if keypath[2] == 'report':
io = ""
else:
io = keypath[2] + 's'
iodir = os.path.join(self._getworkdir(jobname=job, step=step, index=index), io)
for path in paths:
abspath = os.path.join(iodir, path)
if os.path.isfile(abspath):
result.append(abspath)
return result
elif keypath[0] == 'tool' and keypath[2] == 'script':
tool = keypath[1]
step = keypath[3]
index = keypath[4]
refdirs = self.find_files('tool', tool, 'refdir', step, index)
for path in paths:
for refdir in refdirs:
abspath = os.path.join(refdir, path)
if os.path.isfile(abspath):
result.append(abspath)
break
return result
for path in paths:
if (copyall or copy) and ('file' in paramtype):
name = self._get_imported_filename(path)
abspath = os.path.join(self._getworkdir(jobname=job, step='import'), 'outputs', name)
if os.path.isfile(abspath):
# if copy is True and file is found in import outputs,
# continue. Otherwise, fall through to _find_sc_file (the
# file may not have been gathered in imports yet)
result.append(abspath)
continue
result.append(self._find_sc_file(path, missing_ok=missing_ok))
# Convert back to scalar if that was original type
if not is_list:
return result[0]
return result
###########################################################################
def find_result(self, filetype, step, jobname=None, index='0'):
"""
Returns the absolute path of a compilation result.
Utility function that returns the absolute path to a results
file based on the provided arguments. The result directory
structure is:
<dir>/<design>/<jobname>/<step>/<index>/outputs/<design>.filetype
Args:
filetype (str): File extension (.v, .def, etc)
step (str): Task step name ('syn', 'place', etc)
jobname (str): Jobid directory name
index (str): Task index
Returns:
Returns absolute path to file.
Examples:
>>> manifest_filepath = chip.find_result('.vg', 'syn')
Returns the absolute path to the manifest.
"""
if jobname is None:
jobname = self.get('option', 'jobname')
workdir = self._getworkdir(jobname, step, index)
design = self.get('design')
filename = f"{workdir}/outputs/{design}.{filetype}"
self.logger.debug("Finding result %s", filename)
if os.path.isfile(filename):
return filename
else:
return None
###########################################################################
def _abspath(self, cfg):
'''
Internal function that goes through provided dictionary and resolves all
relative paths where required.
'''
for keypath in self.getkeys(cfg=cfg):
paramtype = self.get(*keypath, cfg=cfg, field='type')
value = self.get(*keypath, cfg=cfg)
if value:
#only do something if type is file or dir
if 'file' in paramtype or 'dir' in paramtype:
abspaths = self.find_files(*keypath, cfg=cfg, missing_ok=True)
self.set(*keypath, abspaths, cfg=cfg)
###########################################################################
def _print_csv(self, cfg, fout):
csvwriter = csv.writer(fout)
csvwriter.writerow(['Keypath', 'Value'])
allkeys = self.getkeys(cfg=cfg)
for key in allkeys:
keypath = ','.join(key)
value = self.get(*key, cfg=cfg)
if isinstance(value,list):
for item in value:
csvwriter.writerow([keypath, item])
else:
csvwriter.writerow([keypath, value])
###########################################################################
def _escape_val_tcl(self, val, typestr):
'''Recursive helper function for converting Python values to safe TCL
values, based on the SC type string.'''
if val is None:
return ''
elif typestr.startswith('('):
# Recurse into each item of tuple
subtypes = typestr.strip('()').split(',')
valstr = ' '.join(self._escape_val_tcl(v, subtype.strip())
for v, subtype in zip(val, subtypes))
return f'[list {valstr}]'
elif typestr.startswith('['):
# Recurse into each item of list
subtype = typestr.strip('[]')
valstr = ' '.join(self._escape_val_tcl(v, subtype) for v in val)
return f'[list {valstr}]'
elif typestr == 'bool':
return 'true' if val else 'false'
elif typestr == 'str':
# Escape string by surrounding it with "" and escaping the few
# special characters that still get considered inside "". We don't
# use {}, since this requires adding permanent backslashes to any
# curly braces inside the string.
# Source: https://www.tcl.tk/man/tcl8.4/TclCmd/Tcl.html (section [4] on)
escaped_val = (val.replace('\\', '\\\\') # escape '\' to avoid backslash substition (do this first, since other replaces insert '\')
.replace('[', '\\[') # escape '[' to avoid command substition
.replace('$', '\\$') # escape '$' to avoid variable substition
.replace('"', '\\"')) # escape '"' to avoid string terminating early
return '"' + escaped_val + '"'
elif typestr in ('file', 'dir'):
# Replace $VAR with $env(VAR) for tcl
val = re.sub(r'\$(\w+)', r'$env(\1)', val)
# Same escapes as applied to string, minus $ (since we want to resolve env vars).
escaped_val = (val.replace('\\', '\\\\') # escape '\' to avoid backslash substition (do this first, since other replaces insert '\')
.replace('[', '\\[') # escape '[' to avoid command substition
.replace('"', '\\"')) # escape '"' to avoid string terminating early
return '"' + escaped_val + '"'
else:
# floats/ints just become strings
return str(val)
###########################################################################
def _print_tcl(self, cfg, fout=None, prefix=""):
'''
Prints out schema as TCL dictionary
'''
fout.write("#############################################")
fout.write("#!!!! AUTO-GENERATED FILE. DO NOT EDIT!!!!!!")
fout.write("#############################################\n")
allkeys = self.getkeys(cfg=cfg)
for key in allkeys:
typestr = self.get(*key, cfg=cfg, field='type')
value = self.get(*key, cfg=cfg)
#create a TCL dict
keystr = ' '.join(key)
valstr = self._escape_val_tcl(value, typestr)
if not (typestr.startswith('[') or typestr.startswith('(')):
# treat scalars as lists as well
valstr = f'[list {valstr}]'
outstr = f"{prefix} {keystr} {valstr}\n"
#print out all non default values
if 'default' not in key:
fout.write(outstr)
###########################################################################
def merge_manifest(self, cfg, job=None, clobber=True, clear=True, check=False):
"""
Merges an external manifest with the current compilation manifest.
All value fields in the provided schema dictionary are merged into the
current chip object. Dictionaries with non-existent keypath produces a
logger error message and raises the Chip object error flag.
Args:
job (str): Specifies non-default job to merge into
clear (bool): If True, disables append operations for list type
clobber (bool): If True, overwrites existing parameter value
check (bool): If True, checks the validity of each key
partial (bool): If True, perform a partial merge, only merging
keypaths that may have been updated during run().
Examples:
>>> chip.merge_manifest('my.pkg.json')
Merges all parameters in my.pk.json into the Chip object
"""
self._merge_manifest(cfg, job, clobber, clear, check)
def _key_may_be_updated(self, keypath):
'''Helper that returns whether `keypath` can be updated mid-run.'''
# TODO: cleaner way to manage this?
if keypath[0] in ('metric', 'record'):
return True
if keypath[0] == 'flowgraph' and keypath[4] in ('select', 'status'):
return True
return False
###########################################################################
def _merge_manifest(self, cfg, job=None, clobber=True, clear=True, check=False, partial=False):
"""
Internal merge_manifest() implementation with `partial` arg.
partial (bool): If True, perform a partial merge, only merging keypaths
that may have been updated during run().
"""
if job is not None:
# fill ith default schema before populating
self.cfg['history'][job] = schema_cfg()
dst = self.cfg['history'][job]
else:
dst = self.cfg
for keylist in self.getkeys(cfg=cfg):
if partial and not self._key_may_be_updated(keylist):
continue
if keylist[0] in ('history', 'library'):
continue
#only read in valid keypaths without 'default'
key_valid = True
if check:
key_valid = self.valid(*keylist, quiet=False, default_valid=True)
if key_valid and 'default' not in keylist:
# update value, handling scalars vs. lists
typestr = self.get(*keylist, cfg=cfg, field='type')
val = self.get(*keylist, cfg=cfg)
arg = keylist.copy()
arg.append(val)
if bool(re.match(r'\[', typestr)) & bool(not clear):
self.add(*arg, cfg=dst)
else:
self.set(*arg, cfg=dst, clobber=clobber)
# update other fields that a user might modify
for field in self.getdict(*keylist, cfg=cfg).keys():
if field in ('value', 'switch', 'type', 'require', 'defvalue',
'shorthelp', 'example', 'help'):
# skip these fields (value handled above, others are static)
continue
v = self.get(*keylist, cfg=cfg, field=field)
self.set(*keylist, v, cfg=dst, field=field)
###########################################################################
def _keypath_empty(self, key):
'''
Utility function to check key for an empty list.
'''
emptylist = ("null", None, [])
value = self.get(*key)
defvalue = self.get(*key, field='defvalue')
value_empty = (defvalue in emptylist) and (value in emptylist)
return value_empty
###########################################################################
def _check_files(self):
allowed_paths = [os.path.join(self.cwd, self.get('option', 'builddir'))]
allowed_paths.extend(os.environ['SC_VALID_PATHS'].split(os.pathsep))
for keypath in self.getkeys():
if 'default' in keypath:
continue
paramtype = self.get(*keypath, field='type')
#only do something if type is file or dir
if ('history' not in keypath and 'library' not in keypath) and ('file' in paramtype or 'dir' in paramtype):
if self.get(*keypath) is None:
# skip unset values (some directories are None by default)
continue
abspaths = self.find_files(*keypath, missing_ok=True)
if not isinstance(abspaths, list):
abspaths = [abspaths]
for abspath in abspaths:
ok = False
if abspath is not None:
for allowed_path in allowed_paths:
if os.path.commonpath([abspath, allowed_path]) == allowed_path:
ok = True
continue
if not ok:
self.logger.error(f'Keypath {keypath} contains path(s) '
'that do not exist or resolve to files outside of '
'allowed directories.')
return False
return True
###########################################################################
def check_filepaths(self):
'''
Verifies that paths to all files in manifest are valid.
'''
allkeys = self.getkeys()
for keypath in allkeys:
allpaths = []
paramtype = self.get(*keypath, field='type')
if 'file' in paramtype or 'dir' in paramtype:
if 'dir' not in keypath and self.get(*keypath):
allpaths = list(self.get(*keypath))
for path in allpaths:
#check for env var
m = re.match(r'\$(\w+)(.*)', path)
if m:
prefix_path = os.environ[m.group(1)]
path = prefix_path + m.group(2)
file_error = 'file' in paramtype and not os.path.isfile(path)
dir_error = 'dir' in paramtype and not os.path.isdir(path)
if file_error or dir_error:
self.logger.error(f"Paramater {keypath} path {path} is invalid")
self.error = 1
###########################################################################
def _check_manifest_dynamic(self, step, index):
'''Runtime checks called from _runtask().
- Make sure expected inputs exist.
- Make sure all required filepaths resolve correctly.
'''
flow = self.get('option', 'flow')
tool = self.get('flowgraph', flow, step, index, 'tool')
if self.valid('tool', tool, 'input', step, index):
required_inputs = self.get('tool', tool, 'input', step, index)
else:
required_inputs = []
input_dir = os.path.join(self._getworkdir(step=step, index=index), 'inputs')
for filename in required_inputs:
path = os.path.join(input_dir, filename)
if not os.path.isfile(path):
self.logger.error(f'Required input {filename} not received for {step}{index}.')
self.error = 1
if (not tool in self.builtin) and self.valid('tool', tool, 'require', step, index):
all_required = self.get('tool', tool, 'require', step, index)
for item in all_required:
keypath = item.split(',')
paramtype = self.get(*keypath, field='type')
if ('file' in paramtype) or ('dir' in paramtype):
abspath = self.find_files(*keypath)
if abspath is None or (isinstance(abspath, list) and None in abspath):
self.logger.error(f"Required file keypath {keypath} can't be resolved.")
self.error = 1
# Need to run this check here since file resolution can change in
# _runtask().
if 'SC_VALID_PATHS' in os.environ:
if not self._check_files():
self.error = 1
return self.error
###########################################################################
def check_manifest(self):
'''
Verifies the integrity of the pre-run compilation manifest.
Checks the validity of the current schema manifest in
memory to ensure that the design has been properly set up prior
to running compilation. The function is called inside the run()
function but can also be called separately. Checks performed by the
check_manifest() function include:
* Has a flowgraph been defined?
* Does the manifest satisfy the schema requirement field settings?
* Are all flowgraph input names legal step/index pairs?
* Are the tool parameter setting requirements met?
Returns:
Returns True if the manifest is valid, else returns False.
Examples:
>>> manifest_ok = chip.check_manifest()
Returns True of the Chip object dictionary checks out.
'''
# Dynamic checks
# We only perform these if arg, step and arg, index are set.
# We don't check inputs for skip all
# TODO: Need to add skip step
cur_step = self.get('arg', 'step')
cur_index = self.get('arg', 'index')
if cur_step and cur_index and not self.get('option', 'skipall'):
return self._check_manifest_dynamic(cur_step, cur_index)
design = self.get('design')
flow = self.get('option', 'flow')
jobname = self.get('option', 'jobname')
steplist = self.get('option', 'steplist')
if not steplist:
steplist = self.list_steps()
#1. Checking that flowgraph and steplist are legal
if flow not in self.getkeys('flowgraph'):
self.error = 1
self.logger.error(f"flowgraph {flow} not defined.")
legal_steps = self.getkeys('flowgraph',flow)
if 'import' not in legal_steps:
self.error = 1
self.logger.error("Flowgraph doesn't contain import step.")
indexlist = {}
#TODO: refactor
for step in steplist:
if self.get('option', 'indexlist'):
indexlist[step] = self.get('option', 'indexlist')
else:
indexlist[step] = self.getkeys('flowgraph', flow, step)
for step in steplist:
for index in indexlist[step]:
in_job = None
if (step in self.getkeys('option', 'jobinput') and
index in self.getkeys('option', 'jobinput', step)):
in_job = self.get('option', 'jobinput', step, index)
for in_step, in_index in self.get('flowgraph', flow, step, index, 'input'):
if in_job is not None:
workdir = self._getworkdir(jobname=in_job, step=in_step, index=in_index)
cfg = os.path.join(workdir, 'outputs', f'{design}.pkg.json')
if not os.path.isfile(cfg):
self.logger.error(f'{step}{index} relies on {in_step}{in_index} from job {in_job}, '
'but this task has not been run.')
self.error = 1
continue
if in_step in steplist and in_index in indexlist[in_step]:
# we're gonna run this step, OK
continue
if self.get('flowgraph', flow, in_step, in_index, 'status') == TaskStatus.SUCCESS:
# this task has already completed successfully, OK
continue
self.logger.error(f'{step}{index} relies on {in_step}{in_index}, '
'but this task has not been run and is not in the current steplist.')
self.error = 1
#2. Check libary names
for item in self.get('asic', 'logiclib'):
if item not in self.getkeys('library'):
self.error = 1
self.logger.error(f"Target library {item} not found.")
#3. Check requirements list
allkeys = self.getkeys()
for key in allkeys:
keypath = ",".join(key)
if 'default' not in key and 'history' not in key and 'library' not in key:
key_empty = self._keypath_empty(key)
requirement = self.get(*key, field='require')
if key_empty and (str(requirement) == 'all'):
self.error = 1
self.logger.error(f"Global requirement missing for [{keypath}].")
elif key_empty and (str(requirement) == self.get('option', 'mode')):
self.error = 1
self.logger.error(f"Mode requirement missing for [{keypath}].")
#4. Check per tool parameter requirements (when tool exists)
for step in steplist:
for index in self.getkeys('flowgraph', flow, step):
tool = self.get('flowgraph', flow, step, index, 'tool')
if (tool not in self.builtin) and (tool in self.getkeys('tool')):
# checking that requirements are set
if self.valid('tool', tool, 'require', step, index):
all_required = self.get('tool', tool, 'require', step, index)
for item in all_required:
keypath = item.split(',')
if self._keypath_empty(keypath):
self.error = 1
self.logger.error(f"Value empty for [{keypath}] for {tool}.")
if self._keypath_empty(['tool', tool, 'exe']):
self.error = 1
self.logger.error(f'Executable not specified for tool {tool}')
if 'SC_VALID_PATHS' in os.environ:
if not self._check_files():
self.error = 1
if not self._check_flowgraph_io():
self.error = 1
return self.error
###########################################################################
def _gather_outputs(self, step, index):
'''Return set of filenames that are guaranteed to be in outputs
directory after a successful run of step/index.'''
flow = self.get('option', 'flow')
tool = self.get('flowgraph', flow, step, index, 'tool')
outputs = set()
if tool in self.builtin:
in_tasks = self.get('flowgraph', flow, step, index, 'input')
in_task_outputs = [self._gather_outputs(*task) for task in in_tasks]
if tool in ('minimum', 'maximum'):
if len(in_task_outputs) > 0:
outputs = in_task_outputs[0].intersection(*in_task_outputs[1:])
elif tool in ('join', 'nop'):
if len(in_task_outputs) > 0:
outputs = in_task_outputs[0].union(*in_task_outputs[1:])
else:
# TODO: logic should be added here when mux/verify builtins are implemented.
self.logger.error(f'Builtin {tool} not yet implemented')
else:
# Not builtin tool
if self.valid('tool', tool, 'output', step, index):
outputs = set(self.get('tool', tool, 'output', step, index))
else:
outputs = set()
if step == 'import':
imports = {self._get_imported_filename(p) for p in self._collect_paths()}
outputs.update(imports)
return outputs
###########################################################################
def _check_flowgraph_io(self):
'''Check if flowgraph is valid in terms of input and output files.
Returns True if valid, False otherwise.
'''
flow = self.get('option', 'flow')
steplist = self.get('option', 'steplist')
if not steplist:
steplist = self.list_steps()
if len(steplist) < 2:
return True
for step in steplist:
for index in self.getkeys('flowgraph', flow, step):
# For each task, check input requirements.
tool = self.get('flowgraph', flow, step, index, 'tool')
if tool in self.builtin:
# We can skip builtins since they don't have any particular
# input requirements -- they just pass through what they
# receive.
continue
# Get files we receive from input tasks.
in_tasks = self.get('flowgraph', flow, step, index, 'input')
if len(in_tasks) > 1:
self.logger.error(f'Tool task {step}{index} has more than one input task.')
elif len(in_tasks) > 0:
in_step, in_index = in_tasks[0]
if in_step not in steplist:
# If we're not running the input step, the required
# inputs need to already be copied into the build
# directory.
jobname = self.get('option', 'jobname')
if self.valid('option', 'jobinput', step, index):
in_job = self.get('option', 'jobinput', step, index)
else:
in_job = jobname
workdir = self._getworkdir(jobname=in_job, step=in_step, index=in_index)
in_step_out_dir = os.path.join(workdir, 'outputs')
inputs = set(os.listdir(in_step_out_dir))
else:
inputs = self._gather_outputs(in_step, in_index)
else:
inputs = set()
if self.valid('tool', tool, 'input', step, index):
requirements = self.get('tool', tool, 'input', step, index)
else:
requirements = []
for requirement in requirements:
if requirement not in inputs:
self.logger.error(f'Invalid flow: {step}{index} will '
f'not receive required input {requirement}.')
return False
return True
###########################################################################
def read_manifest(self, filename, job=None, clear=True, clobber=True):
"""
Reads a manifest from disk and merges it with the current compilation manifest.
The file format read is determined by the filename suffix. Currently
json (*.json) and yaml(*.yaml) formats are supported.
Args:
filename (filepath): Path to a manifest file to be loaded.
job (str): Specifies non-default job to merge into.
clear (bool): If True, disables append operations for list type.
clobber (bool): If True, overwrites existing parameter value.
Examples:
>>> chip.read_manifest('mychip.json')
Loads the file mychip.json into the current Chip object.
"""
self._read_manifest(filename, job=job, clear=clear, clobber=clobber)
###########################################################################
def _read_manifest(self, filename, job=None, clear=True, clobber=True, partial=False):
"""
Internal read_manifest() implementation with `partial` arg.
partial (bool): If True, perform a partial merge, only merging keypaths
that may have been updated during run().
"""
filepath = os.path.abspath(filename)
self.logger.debug(f"Reading manifest {filepath}")
if not os.path.isfile(filepath):
error_message = f"Manifest file not found {filepath}"
self.logger.error(error_message)
raise SiliconCompilerError(error_message)
#Read arguments from file based on file type
if filepath.endswith('.gz'):
fin = gzip.open(filepath, 'r')
else:
fin = open(filepath, 'r')
try:
if re.search(r'(\.json|\.sup)(\.gz)*$', filepath):
localcfg = json.load(fin)
elif re.search(r'(\.yaml|\.yml)(\.gz)*$', filepath):
localcfg = yaml.load(fin, Loader=yaml.SafeLoader)
else:
self.logger.error('File format not recognized %s', filepath)
self.error = 1
finally:
fin.close()
if self.get('schemaversion') != localcfg['schemaversion']['value']:
self.logger.warning('Attempting to read manifest with incompatible '
'schema version into current chip object. Skipping...')
return
# Merging arguments with the Chip configuration
self._merge_manifest(localcfg, job=job, clear=clear, clobber=clobber, partial=partial)
# Read history
if 'history' in localcfg and not partial:
for historic_job in localcfg['history'].keys():
self._merge_manifest(localcfg['history'][historic_job],
job=historic_job,
clear=clear,
clobber=clobber,
partial=False)
if 'library' in localcfg and not partial:
for libname in localcfg['library'].keys():
if libname in self.cfg['library']:
# TODO: should we make this a proper merge?
self.logger.warning(f'Overwriting existing library {libname} '
f'in object with values read from {filename}.')
self._import_library(libname, localcfg['library'][libname])
###########################################################################
def write_manifest(self, filename, prune=True, abspath=False, job=None):
'''
Writes the compilation manifest to a file.
The write file format is determined by the filename suffix. Currently
json (*.json), yaml (*.yaml), tcl (*.tcl), and (*.csv) formats are
supported.
Args:
filename (filepath): Output filepath
prune (bool): If True, essential non-empty parameters from the
the Chip object schema are written to the output file.
abspath (bool): If set to True, then all schema filepaths
are resolved to absolute filepaths.
Examples:
>>> chip.write_manifest('mydump.json')
Prunes and dumps the current chip manifest into mydump.json
'''
filepath = os.path.abspath(filename)
self.logger.debug('Writing manifest to %s', filepath)
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
if prune:
self.logger.debug('Pruning dictionary before writing file %s', filepath)
# Keep empty lists to simplify TCL coding
if filepath.endswith('.tcl'):
keeplists = True
else:
keeplists = False
cfgcopy = self._prune(self.cfg, keeplists=keeplists)
else:
cfgcopy = copy.deepcopy(self.cfg)
# resolve absolute paths
if abspath:
self._abspath(cfgcopy)
is_csv = re.search(r'(\.csv)(\.gz)*$', filepath)
# format specific dumping
if filepath.endswith('.gz'):
fout = gzip.open(filepath, 'wt', encoding='UTF-8')
elif is_csv:
# Files written using csv library should be opened with newline=''
# https://docs.python.org/3/library/csv.html#id3
fout = open(filepath, 'w', newline='')
else:
fout = open(filepath, 'w')
# format specific printing
try:
if re.search(r'(\.json|\.sup)(\.gz)*$', filepath):
fout.write(json.dumps(cfgcopy, indent=4, sort_keys=True))
elif re.search(r'(\.yaml|\.yml)(\.gz)*$', filepath):
fout.write(yaml.dump(cfgcopy, Dumper=YamlIndentDumper, default_flow_style=False))
elif re.search(r'(\.tcl)(\.gz)*$', filepath):
self._print_tcl(cfgcopy, prefix="dict set sc_cfg", fout=fout)
elif is_csv:
self._print_csv(cfgcopy, fout=fout)
else:
self.logger.error('File format not recognized %s', filepath)
self.error = 1
finally:
fout.close()
###########################################################################
def check_checklist(self, standard, items=None, check_ok=False):
'''
Check items in a checklist.
Checks the status of items in a checklist for the standard provided. If
a specific list of items is unspecified, all items are checked.
All items have an associated 'task' parameter, which indicates which
tasks can be used to automatically validate the item. For an item to be
checked, all tasks must satisfy the item's criteria, unless waivers are
provided. In addition, that task must have generated EDA report files
for each metric in the criteria.
For items without an associated task, the only requirement is that at
least one report has been added to that item.
When 'check_ok' is True, every item must also have its 'ok' parameter
set to True, indicating that a human has reviewed the item.
Args:
standard (str): Standard to check.
items (list of str): Items to check from standard.
check_ok (bool): Whether to check item 'ok' parameter.
Returns:
Status of item check.
Examples:
>>> status = chip.check_checklist('iso9000', 'd000')
Returns status.
'''
self.logger.info(f'Checking checklist {standard}')
if items is None:
items = self.getkeys('checklist', standard)
flow = self.get('option', 'flow')
for item in items:
all_criteria = self.get('checklist', standard, item, 'criteria')
for criteria in all_criteria:
m = re.match(r'(\w+)([\>\=\<]+)(\w+)', criteria)
if not m:
self.logger.error(f"Illegal checklist criteria: {criteria}")
self.error = 1
return False
elif m.group(1) not in self.getkeys('metric', 'default', 'default'):
self.logger.error(f"Critera must use legal metrics only: {criteria}")
self.error = 1
return False
metric = m.group(1)
op = m.group(2)
goal = float(m.group(3))
tasks = self.get('checklist', standard, item, 'task')
for job, step, index in tasks:
# Automated checks
flow = self.get('option', 'flow', job=job)
tool = self.get('flowgraph', flow, step, index, 'tool', job=job)
value = self.get('metric', step, index, metric, job=job)
criteria_ok = self._safecompare(value, op, goal)
if metric in self.getkeys('checklist', standard, item, 'waiver'):
waivers = self.get('checklist', standard, item, 'waiver', metric)
else:
waivers = []
criteria_str = f'{metric}{op}{goal}'
if not criteria_ok and waivers:
self.logger.warning(f'{item} criteria {criteria_str} unmet by task {step}{index}, but found waivers.')
elif not criteria_ok:
self.logger.error(f'{item} criteria {criteria_str} unmet by task {step}{index}.')
self.error = 1
return False
if (step in self.getkeys('tool', tool, 'report', job=job) and
index in self.getkeys('tool', tool, 'report', step, job=job) and
metric in self.getkeys('tool', tool, 'report', step, index, job=job)):
eda_reports = self.find_files('tool', tool, 'report', step, index, metric, job=job)
else:
eda_reports = None
if not eda_reports:
self.logger.error(f'No EDA reports generated for metric {metric} in task {step}{index}')
self.error = 1
return False
for report in eda_reports:
if report not in self.get('checklist', standard, item, 'report'):
self.add('checklist', standard, item, 'report', report)
if len(self.get('checklist', standard, item, 'report')) == 0:
# TODO: validate that report exists?
self.logger.error(f'No report documenting item {item}')
self.error = 1
return False
if check_ok and not self.get('checklist', standard, item, 'ok'):
self.logger.error(f"Item {item} 'ok' field not checked")
self.error = 1
return False
self.logger.info('Check succeeded!')
return True
###########################################################################
def read_file(self, filename, step='import', index='0'):
'''
Read file defined in schema. (WIP)
'''
return(0)
###########################################################################
def update(self):
'''
Update the chip dependency graph.
1. Finds all packages in the local cache
2. Fetches all packages in the remote registry
3. Creates a dependency graph based on current chip dependencies and
dependencies read from dependency json objects.
4. If autoinstall is set, copy registry packages to local cache.
5. Error out if package is not found in local cache or in registry.
6. Error out if autoinstall is set and registry package is missing.
'''
# schema settings
design = self.get('design')
reglist = self.get('option', 'registry')
auto = self.get('option','autoinstall')
# environment settings
# Local cache location
if 'SC_HOME' in os.environ:
home = os.environ['SC_HOME']
else:
home = os.environ['HOME']
cache = os.path.join(home,'.sc','registry')
# Indexing all local cache packages
local = self._build_index(cache)
remote = self._build_index(reglist)
# Cycle through current chip dependencies
deps = {}
for dep in self.getkeys('package', 'dependency'):
deps[dep] = self.get('package', 'dependency', dep)
depgraph = self._find_deps(cache, local, remote, design, deps, auto)
# Update dependency graph
for dep in depgraph:
self.set('package', 'depgraph', dep, depgraph[dep])
return depgraph
###########################################################################
def _build_index(self, dirlist):
'''
Build a package index for a registry.
'''
if not isinstance(dirlist, list):
dirlist = [dirlist]
index = {}
for item in dirlist:
if re.match(r'http', item):
#TODO
pass
else:
packages = os.listdir(item)
for i in packages:
versions = os.listdir(os.path.join(item, i))
index[i] = {}
for j in versions:
index[i][j] = item
return index
###########################################################################
def _install_package(self, cache, dep, ver, remote):
'''
Copies a package from remote to local.
The remote and local arguments are package indices of format:
index['dirname']['dep']
'''
package = f"{dep}-{ver}.sup.gz"
self.logger.info(f"Installing package {package} in {cache}")
# Check that package exists in remote registry
if dep in remote.keys():
if ver not in list(remote[dep].keys()):
self.logger.error(f"Package {dep}-{ver} not found in registry.")
sys.exit()
ifile = os.path.join(remote[dep][ver],dep,ver,package)
odir = os.path.join(cache,dep,ver)
ofile = os.path.join(odir,package)
# Install package
os.makedirs(odir, exist_ok=True)
shutil.copyfile(ifile, ofile)
###########################################################################
def _find_deps(self, cache, local, remote, design, deps, auto, depgraph={}, upstream={}):
'''
Recursive function to find and install dependencies.
'''
# install missing dependencies
depgraph[design] = []
for dep in deps.keys():
#TODO: Proper PEP semver matching
ver = list(deps[dep])[0]
depgraph[design].append((dep,ver))
islocal = False
if dep in local.keys():
if ver in local[dep]:
islocal = True
# install and update local index
if auto and islocal:
self.logger.info(f"Found package {dep}-{ver} in cache")
elif auto and not islocal:
self._install_package(cache, dep, ver, remote)
local[dep]=ver
# look through dependency package files
package = os.path.join(cache,dep,ver,f"{dep}-{ver}.sup.gz")
if not os.path.isfile(package):
self.logger.error("Package missing. Try 'autoinstall' or install manually.")
sys.exit()
with gzip.open(package, 'r') as f:
localcfg = json.load(f)
# done if no more dependencies
if 'dependency' in localcfg['package']:
subdeps = {}
subdesign = localcfg['design']['value']
depgraph[subdesign] = []
for item in localcfg['package']['dependency'].keys():
subver = localcfg['package']['dependency'][item]['value']
if (item in upstream) and (upstream[item] == subver):
# Circular imports are not supported.
raise SiliconCompilerError(f'Cannot process circular import: {dep}-{ver} <---> {item}-{subver}.')
subdeps[item] = subver
upstream[item] = subver
depgraph[subdesign].append((item, subver))
self._find_deps(cache, local, remote, subdesign, subdeps, auto, depgraph, upstream)
return depgraph
###########################################################################
def import_library(self, lib_chip):
'''Import a Chip object into current Chip as a library.
Args:
lib_chip (Chip): An instance of Chip to import.
'''
self._import_library(lib_chip.design, lib_chip.cfg)
###########################################################################
def _import_library(self, libname, libcfg):
'''Helper to import library with config 'libconfig' as a library
'libname' in current Chip object.'''
self.cfg['library'][libname] = copy.deepcopy(libcfg)
if 'pdk' in self.cfg['library'][libname]:
del self.cfg['library'][libname]['pdk']
###########################################################################
def write_depgraph(self, filename):
'''
Writes the package dependency tree to disk.
Supported graphical render formats include png, svg, gif, pdf and a
few others. (see https://graphviz.org for more information).
Supported text formats include .md, .rst. (see the Linux 'tree'
command for more information).
'''
return(0)
###########################################################################
def write_flowgraph(self, filename, flow=None,
fillcolor='#ffffff', fontcolor='#000000',
fontsize='14', border=True, landscape=False):
'''Renders and saves the compilation flowgraph to a file.
The chip object flowgraph is traversed to create a graphviz (\*.dot)
file comprised of node, edges, and labels. The dot file is a
graphical representation of the flowgraph useful for validating the
correctness of the execution flow graph. The dot file is then
converted to the appropriate picture or drawing format based on the
filename suffix provided. Supported output render formats include
png, svg, gif, pdf and a few others. For more information about the
graphviz project, see see https://graphviz.org/
Args:
filename (filepath): Output filepath
flow (str): Name of flowgraph to render
fillcolor(str): Node fill RGB color hex value
fontcolor (str): Node font RGB color hex value
fontsize (str): Node text font size
border (bool): Enables node border if True
landscape (bool): Renders graph in landscape layout if True
Examples:
>>> chip.write_flowgraph('mydump.png')
Renders the object flowgraph and writes the result to a png file.
'''
filepath = os.path.abspath(filename)
self.logger.debug('Writing flowgraph to file %s', filepath)
fileroot, ext = os.path.splitext(filepath)
fileformat = ext.replace(".", "")
if flow is None:
flow = self.get('option', 'flow')
# controlling border width
if border:
penwidth = '1'
else:
penwidth = '0'
# controlling graph direction
if landscape:
rankdir = 'LR'
else:
rankdir = 'TB'
dot = graphviz.Digraph(format=fileformat)
dot.graph_attr['rankdir'] = rankdir
dot.attr(bgcolor='transparent')
for step in self.getkeys('flowgraph',flow):
irange = 0
for index in self.getkeys('flowgraph', flow, step):
irange = irange +1
for i in range(irange):
index = str(i)
node = step+index
# create step node
tool = self.get('flowgraph', flow, step, index, 'tool')
if tool in self.builtin:
labelname = step
elif tool is not None:
labelname = f"{step}{index}\n({tool})"
else:
labelname = f"{step}{index}"
dot.node(node, label=labelname, bordercolor=fontcolor, style='filled',
fontcolor=fontcolor, fontsize=fontsize, ordering="in",
penwidth=penwidth, fillcolor=fillcolor)
# get inputs
all_inputs = []
for in_step, in_index in self.get('flowgraph', flow, step, index, 'input'):
all_inputs.append(in_step + in_index)
for item in all_inputs:
dot.edge(item, node)
dot.render(filename=fileroot, cleanup=True)
########################################################################
def _collect_paths(self):
'''
Returns list of paths to files that will be collected by import step.
See docstring for _collect() for more details.
'''
paths = []
copyall = self.get('option', 'copyall')
allkeys = self.getkeys()
for key in allkeys:
leaftype = self.get(*key, field='type')
if re.search('file', leaftype):
copy = self.get(*key, field='copy')
value = self.get(*key)
if copyall or copy:
for item in value:
paths.append(item)
return paths
########################################################################
def _collect(self, step, index):
'''
Collects files found in the configuration dictionary and places
them in inputs/. The function only copies in files that have the 'copy'
field set as true. If 'copyall' is set to true, then all files are
copied in.
1. indexing like in run, job1
2. chdir package
3. run tool to collect files, pickle file in output/design.v
4. copy in rest of the files below
5. record files read in to schema
'''
indir = 'inputs'
flow = self.get('option', 'flow')
if not os.path.exists(indir):
os.makedirs(indir)
self.logger.info('Collecting input sources')
for path in self._collect_paths():
filename = self._get_imported_filename(path)
abspath = self._find_sc_file(path)
if abspath:
self.logger.info(f"Copying {abspath} to '{indir}' directory")
shutil.copy(abspath, os.path.join(indir, filename))
else:
self._haltstep(step, index)
outdir = 'outputs'
if not os.path.exists(outdir):
os.makedirs(outdir)
# Logic to make links from outputs/ to inputs/, skipping anything that
# will be output by the tool as well as the manifest. We put this here
# so that tools used for the import stage don't have to duplicate this
# logic. We skip this logic for 'join'-based single-step imports, since
# 'join' does the copy for us.
tool = self.get('flowgraph', flow, step, index, 'tool')
if tool not in self.builtin:
if self.valid('tool', tool, 'output', step, index):
outputs = self.get('tool', tool, 'output', step, index)
else:
outputs = []
design = self.get('design')
ignore = outputs + [f'{design}.pkg.json']
utils.copytree(indir, outdir, dirs_exist_ok=True, link=True, ignore=ignore)
elif tool not in ('join', 'nop'):
self.error = 1
self.logger.error(f'Invalid import step builtin {tool}. Must be tool or join.')
###########################################################################
def archive(self, step=None, index=None, all_files=False):
'''Archive a job directory.
Creates a single compressed archive (.tgz) based on the design,
jobname, and flowgraph in the current chip manifest. Individual
steps and/or indices can be archived based on argumnets specified.
By default, all steps and indices in the flowgraph are archived.
By default, only the outputs directory content and the log file
are archived.
Args:
step(str): Step to archive.
index (str): Index to archive
all_files (bool): If True, all files are archived.
'''
design = self.get('design')
jobname = self.get('option', 'jobname')
buildpath = self.get('option', 'builddir')
flow = self.get('option', 'flow')
if step:
steplist = [step]
elif self.get('arg', 'step'):
steplist = [self.get('arg', 'step')]
elif self.get('option', 'steplist'):
steplist = self.get('option', 'steplist')
else:
steplist = self.list_steps()
if step:
archive_name = f"{design}_{jobname}_{step}.tgz"
else:
archive_name = f"{design}_{jobname}.tgz"
with tarfile.open(archive_name, "w:gz") as tar:
for step in steplist:
if index:
indexlist = [index]
else:
indexlist = self.getkeys('flowgraph', flow, step)
for item in indexlist:
basedir = os.path.join(buildpath, design, jobname, step, item)
if all_files:
tar.add(os.path.abspath(basedir), arcname=basedir)
else:
outdir = os.path.join(basedir,'outputs')
logfile = os.path.join(basedir, step+'.log')
tar.add(os.path.abspath(outdir), arcname=outdir)
if os.path.isfile(logfile):
tar.add(os.path.abspath(logfile), arcname=logfile)
return archive_name
###########################################################################
def hash_files(self, *keypath, algo='sha256', update=True):
'''Generates hash values for a list of parameter files.
Generates a a hash value for each file found in the keypath.
If the update variable is True, the has values are recorded in the
'filehash' field of the parameter, following the order dictated by
the files within the 'values' parameter field.
Files are located using the find_files() function.
The file hash calculation is performed basd on the 'algo' setting.
Supported algorithms include SHA1, SHA224, SHA256, SHA384, SHA512,
and MD5.
Args:
*keypath(str): Keypath to parameter.
algo (str): Algorithm to use for file hash calculation
update (bool): If True, the hash values are recorded in the
chip object manifest.
Returns:
A list of hash values.
Examples:
>>> hashlist = hash_files('sources')
Hashlist gets list of hash values computed from 'sources' files.
'''
keypathstr = ','.join(keypath)
#TODO: Insert into find_files?
if 'file' not in self.get(*keypath, field='type'):
self.logger.error(f"Illegal attempt to hash non-file parameter [{keypathstr}].")
self.error = 1
else:
filelist = self.find_files(*keypath)
#cycle through all paths
hashlist = []
if filelist:
self.logger.info(f'Computing hash value for [{keypathstr}]')
for filename in filelist:
if os.path.isfile(filename):
#TODO: Implement algo selection
hashobj = hashlib.sha256()
with open(filename, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
hashobj.update(byte_block)
hash_value = hashobj.hexdigest()
hashlist.append(hash_value)
else:
self.error = 1
self.logger.info(f"Internal hashing error, file not found")
# compare previous hash to new hash
oldhash = self.get(*keypath,field='filehash')
for i,item in enumerate(oldhash):
if item != hashlist[i]:
self.logger.error(f"Hash mismatch for [{keypath}]")
self.error = 1
self.set(*keypath, hashlist, field='filehash', clobber=True)
###########################################################################
def audit_manifest(self):
'''Verifies the integrity of the post-run compilation manifest.
Checks the integrity of the chip object implementation flow after
the run() function has been completed. Errors, warnings, and debug
messages are reported through the logger object.
Audit checks performed include:
* Time stamps
* File modifications
* Error and warning policy
* IP and design origin
* User access
* License terms
* Version checks
Returns:
Returns True if the manifest has integrity, else returns False.
Example:
>>> chip.audit_manifest()
Audits the Chip object manifest and returns 0 if successful.
'''
return 0
###########################################################################
def calc_area(self):
'''Calculates the area of a rectilinear diearea.
Uses the shoelace formulate to calculate the design area using
the (x,y) point tuples from the 'diearea' parameter. If only diearea
paramater only contains two points, then the first and second point
must be the lower left and upper right points of the rectangle.
(Ref: https://en.wikipedia.org/wiki/Shoelace_formula)
Returns:
Design area (float).
Examples:
>>> area = chip.calc_area()
'''
vertices = self.get('asic', 'diearea')
if len(vertices) == 2:
width = vertices[1][0] - vertices[0][0]
height = vertices[1][1] - vertices[0][1]
area = width * height
else:
area = 0.0
for i in range(len(vertices)):
j = (i + 1) % len(vertices)
area += vertices[i][0] * vertices[j][1]
area -= vertices[j][0] * vertices[i][1]
area = abs(area) / 2
return area
###########################################################################
def calc_yield(self, model='poisson'):
'''Calculates raw die yield.
Calculates the raw yield of the design as a function of design area
and d0 defect density. Calculation can be done based on the poisson
model (default) or the murphy model. The die area and the d0
parameters are taken from the chip dictionary.
* Poisson model: dy = exp(-area * d0/100).
* Murphy model: dy = ((1-exp(-area * d0/100))/(area * d0/100))^2.
Args:
model (string): Model to use for calculation (poisson or murphy)
Returns:
Design yield percentage (float).
Examples:
>>> yield = chip.calc_yield()
Yield variable gets yield value based on the chip manifest.
'''
d0 = self.get('pdk', 'd0')
diearea = self.calc_area()
if model == 'poisson':
dy = math.exp(-diearea * d0/100)
elif model == 'murphy':
dy = ((1-math.exp(-diearea * d0/100))/(diearea * d0/100))**2
return dy
##########################################################################
def calc_dpw(self):
'''Calculates dies per wafer.
Calculates the gross dies per wafer based on the design area, wafersize,
wafer edge margin, and scribe lines. The calculation is done by starting
at the center of the wafer and placing as many complete design
footprints as possible within a legal placement area.
Returns:
Number of gross dies per wafer (int).
Examples:
>>> dpw = chip.calc_dpw()
Variable dpw gets gross dies per wafer value based on the chip manifest.
'''
#PDK information
wafersize = self.get('pdk', 'wafersize')
edgemargin = self.get('pdk', 'edgemargin')
hscribe = self.get('pdk', 'hscribe')
vscribe = self.get('pdk', 'vscribe')
#Design parameters
diesize = self.get('asic', 'diesize').split()
diewidth = (diesize[2] - diesize[0])/1000
dieheight = (diesize[3] - diesize[1])/1000
#Derived parameters
radius = wafersize/2 -edgemargin
stepwidth = (diewidth + hscribe)
stepheight = (dieheight + vscribe)
#Raster dies out from center until you touch edge margin
#Work quadrant by quadrant
dies = 0
for quad in ('q1', 'q2', 'q3', 'q4'):
x = 0
y = 0
if quad == "q1":
xincr = stepwidth
yincr = stepheight
elif quad == "q2":
xincr = -stepwidth
yincr = stepheight
elif quad == "q3":
xincr = -stepwidth
yincr = -stepheight
elif quad == "q4":
xincr = stepwidth
yincr = -stepheight
#loop through all y values from center
while math.hypot(0, y) < radius:
y = y + yincr
while math.hypot(x, y) < radius:
x = x + xincr
dies = dies + 1
x = 0
return int(dies)
###########################################################################
def grep(self, args, line):
"""
Emulates the Unix grep command on a string.
Emulates the behavior of the Unix grep command that is etched into
our muscle memory. Partially implemented, not all features supported.
The function returns None if no match is found.
Args:
arg (string): Command line arguments for grep command
line (string): Line to process
Returns:
Result of grep command (string).
"""
# Quick return if input is None
if line is None:
return None
# Partial list of supported grep options
options = {
'-v' : False, # Invert the sense of matching
'-i' : False, # Ignore case distinctions in patterns and data
'-E' : False, # Interpret PATTERNS as extended regular expressions.
'-e' : False, # Safe interpretation of pattern starting with "-"
'-x' : False, # Select only matches that exactly match the whole line.
'-o' : False, # Print only the match parts of a matching line
'-w' : False} # Select only lines containing matches that form whole words.
# Split into repeating switches and everything else
match = re.match(r'\s*((?:\-\w\s)*)(.*)', args)
pattern = match.group(2)
# Split space separated switch string into list
switches = match.group(1).strip().split(' ')
# Find special -e switch update the pattern
for i in range(len(switches)):
if switches[i] == "-e":
if i != (len(switches)):
pattern = ' '.join(switches[i+1:]) + " " + pattern
switches = switches[0:i+1]
break
options["-e"] = True
elif switches[i] in options.keys():
options[switches[i]] = True
elif switches[i] !='':
print("ERROR",switches[i])
#REGEX
#TODO: add all the other optinos
match = re.search(rf"({pattern})", line)
if bool(match) == bool(options["-v"]):
return None
else:
return line
###########################################################################
def check_logfile(self, jobname=None, step=None, index='0',
logfile=None, display=True):
'''
Checks logfile for patterns found in the 'regex' parameter.
Reads the content of the step's log file and compares the
content found in step 'regex' parameter. The matches are
stored in the file 'reports/<design>.<suffix>' in the run directory.
The matches are printed to STDOUT if display is set to True.
Args:
step (str): Task step name ('syn', 'place', etc)
jobname (str): Jobid directory name
index (str): Task index
display (bool): If True, printes matches to STDOUT.
Examples:
>>> chip.check_logfile('place')
Searches for regex matches in the place logfile.
'''
# Using manifest to get defaults
flow = self.get('option', 'flow')
if jobname is None:
jobname = self.get('option', 'jobname')
if logfile is None:
logfile = f"{step}.log"
if step is None:
step = self.get('arg', 'step')
if index is None:
index = self.getkeys('flowgraph', flow, step)[0]
tool = self.get('flowgraph', flow, step, index, 'tool')
# Creating local dictionary (for speed)
# self.get is slow
checks = {}
regex_list = []
if self.valid('tool', tool, 'regex', step, index, 'default'):
regex_list = self.getkeys('tool', tool, 'regex', step, index)
for suffix in regex_list:
checks[suffix] = {}
checks[suffix]['report'] = open(f"{step}.{suffix}", "w")
checks[suffix]['args'] = self.get('tool', tool, 'regex', step, index, suffix)
# Looping through patterns for each line
with open(logfile) as f:
for line in f:
for suffix in checks:
string = line
for item in checks[suffix]['args']:
if string is None:
break
else:
string = self.grep(item, string)
if string is not None:
#always print to file
print(string.strip(), file=checks[suffix]['report'])
#selectively print to display
if display:
self.logger.info(string.strip())
###########################################################################
def _find_leaves(self, steplist):
'''Helper to find final (leaf) tasks for a given steplist.'''
flow = self.get('option', 'flow')
# First, iterate over the tasks to generate a set of non-leaf tasks.
all_tasks = set()
non_leaf_tasks = set()
for step in steplist:
for index in self.getkeys('flowgraph', flow, step):
all_tasks.add((step, index))
for in_step, in_index in self.get('flowgraph', flow, step, index, 'input'):
if in_step in steplist:
non_leaf_tasks.add((in_step, in_index))
# Then, find all leaf tasks by elimination.
return all_tasks.difference(non_leaf_tasks)
###########################################################################
def summary(self, steplist=None, show_all_indices=False):
'''
Prints a summary of the compilation manifest.
Metrics from the flowgraph steps, or steplist parameter if
defined, are printed out on a per step basis. All metrics from the
metric dictionary with weights set in the flowgraph dictionary are
printed out.
Args:
show_all_indices (bool): If True, displays metrics for all indices
of each step. If False, displays metrics only for winning
indices.
Examples:
>>> chip.summary()
Prints out a summary of the run to stdout.
'''
# display whole flowgraph if no steplist specified
flow = self.get('option', 'flow')
if not steplist:
if self.get('option', 'steplist'):
steplist = self.get('option', 'steplist')
else:
steplist = self.list_steps()
# Find all tasks that are part of a "winning" path.
selected_tasks = set()
to_search = []
# Start search with any successful leaf tasks.
leaf_tasks = self._find_leaves(steplist)
for task in leaf_tasks:
if self.get('flowgraph', flow, *task, 'status') == TaskStatus.SUCCESS:
selected_tasks.add(task)
to_search.append(task)
# Search backwards, saving anything that was selected by leaf tasks.
while len(to_search) > 0:
task = to_search.pop(-1)
for selected in self.get('flowgraph', flow, *task, 'select'):
if selected not in selected_tasks:
selected_tasks.add(selected)
to_search.append(selected)
# only report tool based steps functions
for step in steplist.copy():
if self.get('flowgraph',flow, step,'0','tool') in self.builtin:
index = steplist.index(step)
del steplist[index]
# job directory
jobdir = self._getworkdir()
# Custom reporting modes
paramlist = []
for item in self.getkeys('option', 'param'):
paramlist.append(item+"="+self.get('option', 'param', item))
if paramlist:
paramstr = ', '.join(paramlist)
else:
paramstr = "None"
info_list = ["SUMMARY:\n",
"design : " + self.get('design'),
"params : " + paramstr,
"jobdir : "+ jobdir,
]
if self.get('option', 'mode') == 'asic':
pdk = self.get('option', 'pdk')
info_list.extend(["foundry : " + self.get('pdk', pdk, 'foundry'),
"process : " + pdk,
"targetlibs : "+" ".join(self.get('asic', 'logiclib'))])
elif self.get('option', 'mode') == 'fpga':
info_list.extend(["partname : "+self.get('fpga','partname')])
info = '\n'.join(info_list)
print("-"*135)
print(info, "\n")
# Stepping through all steps/indices and printing out metrics
data = []
#Creating Header
header = []
indices_to_show = {}
colwidth = 8
for step in steplist:
if show_all_indices:
indices_to_show[step] = self.getkeys('flowgraph', flow, step)
else:
indices_to_show[step] = []
for index in self.getkeys('flowgraph', flow, step):
if (step, index) in selected_tasks:
indices_to_show[step].append(index)
# header for data frame
for step in steplist:
for index in indices_to_show[step]:
header.append(f'{step}{index}'.center(colwidth))
# figure out which metrics have non-zero weights
metric_list = []
for step in steplist:
for metric in self.getkeys('metric','default','default'):
if metric in self.getkeys('flowgraph', flow, step, '0', 'weight'):
if self.get('flowgraph', flow, step, '0', 'weight', metric) is not None:
if metric not in metric_list:
metric_list.append(metric)
# print out all metrics
metrics = []
for metric in metric_list:
metrics.append(" " + metric)
row = []
for step in steplist:
for index in indices_to_show[step]:
value = self.get('metric', step, index, metric)
if value is None:
value = 'ERR'
else:
value = str(value)
row.append(" " + value.center(colwidth))
data.append(row)
pandas.set_option('display.max_rows', 500)
pandas.set_option('display.max_columns', 500)
pandas.set_option('display.width', 100)
df = pandas.DataFrame(data, metrics, header)
print(df.to_string())
print("-"*135)
# Create a report for the Chip object which can be viewed in a web browser.
# Place report files in the build's root directory.
web_dir = os.path.join(self.get('option', 'builddir'),
self.get('design'),
self.get('option', 'jobname'))
if os.path.isdir(web_dir):
# Gather essential variables.
templ_dir = os.path.join(self.scroot, 'templates', 'report')
design = self.get('design')
flow = self.get('option', 'flow')
flow_steps = steplist
flow_tasks = {}
for step in flow_steps:
flow_tasks[step] = self.getkeys('flowgraph', flow, step)
# Call 'show()' to generate a low-res PNG of the design.
results_gds = self.find_result('gds', step='export')
img_data = None
if results_gds and not self.get('option', 'nodisplay'):
self.show(results_gds,
['-rd', 'screenshot=1', '-rd', 'scr_w=1024', '-rd', 'scr_h=1024', '-z'])
result_file = os.path.join(web_dir, f'{design}.png')
# Result might not exist if there is no display
if os.path.isfile(result_file):
with open(result_file, 'rb') as img_file:
img_data = base64.b64encode(img_file.read()).decode('utf-8')
# Generate results page by passing the Chip manifest into the Jinja2 template.
env = Environment(loader=FileSystemLoader(templ_dir))
results_page = os.path.join(web_dir, 'report.html')
pruned_cfg = self._prune(self.cfg)
if 'history' in pruned_cfg:
del pruned_cfg['history']
if 'library' in pruned_cfg:
del pruned_cfg['library']
with open(results_page, 'w') as wf:
wf.write(env.get_template('sc_report.j2').render(
manifest = self.cfg,
pruned_cfg = pruned_cfg,
metric_keys = metric_list,
metrics = self.cfg['metric'],
tasks = flow_tasks,
img_data = img_data,
))
# Try to open the results and layout only if '-nodisplay' is not set.
if not self.get('option', 'nodisplay'):
try:
webbrowser.get(results_page)
except webbrowser.Error:
# Python 'webbrowser' module includes a limited number of popular defaults.
# Depending on the platform, the user may have defined their own with $BROWSER.
if 'BROWSER' in os.environ:
subprocess.run([os.environ['BROWSER'], results_page])
else:
self.logger.warning('Unable to open results page in web browser:\n' +
os.path.abspath(os.path.join(web_dir, "report.html")))
###########################################################################
def list_steps(self, flow=None):
'''
Returns an ordered list of flowgraph steps.
All step keys from the flowgraph dictionary are collected and the
distance from the root node (ie. without any inputs defined) is
measured for each step. The step list is then sorted based on
the distance from root and returned.
Returns:
A list of steps sorted by distance from the root node.
Example:
>>> steplist = chip.list_steps()
Variable steplist gets list of steps sorted by distance from root.
'''
if flow is None:
flow = self.get('option', 'flow')
#Get length of paths from step to root
depth = {}
for step in self.getkeys('flowgraph', flow):
depth[step] = 0
for path in self._allpaths(self.cfg, flow, step, str(0)):
if len(list(path)) > depth[step]:
depth[step] = len(path)
#Sort steps based on path lenghts
sorted_dict = dict(sorted(depth.items(), key=lambda depth: depth[1]))
return list(sorted_dict.keys())
###########################################################################
def _allpaths(self, cfg, flow, step, index, path=None):
'''Recursive helper for finding all paths from provided step, index to
root node(s) with no inputs.
Returns a list of lists.
'''
if path is None:
path = []
inputs = self.get('flowgraph', flow, step, index, 'input', cfg=cfg)
if not self.get('flowgraph', flow, step, index, 'input', cfg=cfg):
return [path]
else:
allpaths = []
for in_step, in_index in inputs:
newpath = path.copy()
newpath.append(in_step + in_index)
allpaths.extend(self._allpaths(cfg, flow, in_step, in_index, path=newpath))
return allpaths
###########################################################################
def clock(self, pin, period, jitter=0):
"""
Clock configuration helper function.
A utility function for setting all parameters associated with a
single clock definition in the schema.
The method modifies the following schema parameters:
['datasheet', name, 'pin']
['datasheet', name, 'period']
['datasheet', name, 'jitter']
Args:
pin (str): Full hiearchical path to clk pin.
period (float): Clock period specified in ns.
jitter (float): Clock jitter specified in ns.
Examples:
>>> chip.clock('clk, period=1.0)
Create a clock named 'clk' with a 1.0ns period.
"""
design = self.get('design')
self.set('datasheet', design, 'pin', pin, 'type', 'global', 'clk')
period_range = (period * 1e-9, period * 1e-9, period * 1e-9)
self.set('datasheet', design, 'pin', pin, 'tperiod', 'global', period_range)
jitter_range = (jitter * 1e-9, jitter * 1e-9, jitter * 1e-9)
self.set('datasheet', design, 'pin', pin, 'tjitter', 'global', jitter_range)
###########################################################################
def node(self, flow, step, tool, index=0):
'''
Creates a flowgraph node.
Creates a flowgraph node by binding a tool to a task. A task is defined
as the combination of a step and index. A tool can be an external
exeuctable or one of the built in functions in the SiliconCompiler
framework). Built in functions include: minimum, maximum, join, mux,
verify.
The method modifies the following schema parameters:
['flowgraph', flow, step, index, 'tool', tool]
['flowgraph', flow, step, index, 'weight', metric]
Args:
flow (str): Flow name
step (str): Task step name
tool (str): Tool (or builtin function) to associate with task.
index (int): Task index
Examples:
>>> chip.node('asicflow', 'place', 'openroad', index=0)
Creates a task with step='place' and index=0 and binds it to the 'openroad' tool.
'''
# bind tool to node
self.set('flowgraph', flow, step, str(index), 'tool', tool)
# set default weights
for metric in self.getkeys('metric', 'default', 'default'):
self.set('flowgraph', flow, step, str(index), 'weight', metric, 0)
###########################################################################
def edge(self, flow, tail, head, tail_index=0, head_index=0):
'''
Creates a directed edge from a tail node to a head node.
Connects the output of a tail node with the input of a head node by
setting the 'input' field of the head node in the schema flowgraph.
The method modifies the following parameters:
['flowgraph', flow, head, str(head_index), 'input']
Args:
flow (str): Name of flow
tail (str): Name of tail node
head (str): Name of head node
tail_index (int): Index of tail node to connect
head_index (int): Index of head node to connect
Examples:
>>> chip.edge('place', 'cts')
Creates a directed edge from place to cts.
'''
# Handling connecting edges between graphs
# Not completely name space safe, but feels like this limitation
# is a non-issue
module_tail = f"{tail}.export"
module_head = f"{head}.import"
if module_tail in self.getkeys('flowgraph',flow):
tail = module_tail
if module_head in self.getkeys('flowgraph',flow):
head = module_head
#TODO: add error checking
# Adding
self.add('flowgraph', flow, head, str(head_index), 'input', (tail, str(tail_index)))
###########################################################################
def graph(self, flow, subflow, name=None):
'''
Instantiates a named flow as a graph in the current flowgraph.
Args:
flow (str): Name of current flow.
subflow (str): Name of flow to instantiate
name (str): Name of instance
Examples:
>>> chip.graph('asicflow')
Instantiates Creates a directed edge from place to cts.
'''
if flow not in self.getkeys('flowgraph'):
self.cfg['flowgraph'][flow] ={}
# uniquify each step
for step in self.getkeys('flowgraph',subflow):
if name is None:
newstep = step
else:
newstep = name + "." + step
if newstep not in self.getkeys('flowgraph', flow):
self.cfg['flowgraph'][flow][newstep] ={}
# recursive copy
for key in self._allkeys(self.cfg['flowgraph'][subflow][step]):
self._copyparam(self.cfg['flowgraph'][subflow][step],
self.cfg['flowgraph'][flow][newstep],
key)
# update step names
for index in self.getkeys('flowgraph', flow, newstep):
all_inputs = self.get('flowgraph', flow, newstep, index,'input')
self.set('flowgraph', flow, newstep, index,'input',[])
for in_step, in_index in all_inputs:
newin = name + "." + in_step
self.add('flowgraph', flow, newstep, index,'input',(newin,in_index))
###########################################################################
def pipe(self, flow, plan):
'''
Creates a pipeline based on an order list of key values pairs.
'''
for item in plan:
step = list(item.keys())[0]
tool = list(item.values())[0]
self.node(flow, step, tool)
if step != 'import':
self.edge(flow, prevstep, step)
prevstep = step
###########################################################################
def join(self, *tasks):
'''
Merges outputs from a list of input tasks.
Args:
tasks(list): List of input tasks specified as (step,index) tuples.
Returns:
Input list
Examples:
>>> select = chip.join([('lvs','0'), ('drc','0')])
Select gets the list [('lvs','0'), ('drc','0')]
'''
tasklist = list(tasks)
sel_inputs = tasklist
# no score for join, so just return 0
return sel_inputs
###########################################################################
def nop(self, *task):
'''
A no-operation that passes inputs to outputs.
Args:
task(list): Input task specified as a (step,index) tuple.
Returns:
Input task
Examples:
>>> select = chip.nop(('lvs','0'))
Select gets the tuple [('lvs',0')]
'''
return list(task)
###########################################################################
def minimum(self, *tasks):
'''
Selects the task with the minimum metric score from a list of inputs.
Sequence of operation:
1. Check list of input tasks to see if all metrics meets goals
2. Check list of input tasks to find global min/max for each metric
3. Select MIN value if all metrics are met.
4. Normalize the min value as sel = (val - MIN) / (MAX - MIN)
5. Return normalized value and task name
Meeting metric goals takes precedence over compute metric scores.
Only goals with values set and metrics with weights set are considered
in the calculation.
Args:
tasks(list): List of input tasks specified as (step,index) tuples.
Returns:
tuple containing
- score (float): Minimum score
- task (tuple): Task with minimum score
Examples:
>>> (score, task) = chip.minimum([('place','0'),('place','1')])
'''
return self._minmax(*tasks, op="minimum")
###########################################################################
def maximum(self, *tasks):
'''
Selects the task with the maximum metric score from a list of inputs.
Sequence of operation:
1. Check list of input tasks to see if all metrics meets goals
2. Check list of input tasks to find global min/max for each metric
3. Select MAX value if all metrics are met.
4. Normalize the min value as sel = (val - MIN) / (MAX - MIN)
5. Return normalized value and task name
Meeting metric goals takes precedence over compute metric scores.
Only goals with values set and metrics with weights set are considered
in the calculation.
Args:
tasks(list): List of input tasks specified as (step,index) tuples.
Returns:
tuple containing
- score (float): Maximum score.
- task (tuple): Task with minimum score
Examples:
>>> (score, task) = chip.maximum([('place','0'),('place','1')])
'''
return self._minmax(*tasks, op="maximum")
###########################################################################
def _minmax(self, *steps, op="minimum", **selector):
'''
Shared function used for min and max calculation.
'''
if op not in ('minimum', 'maximum'):
raise ValueError('Invalid op')
flow = self.get('option', 'flow')
steplist = list(steps)
# Keeping track of the steps/indexes that have goals met
failed = {}
for step, index in steplist:
if step not in failed:
failed[step] = {}
failed[step][index] = False
if self.get('flowgraph', flow, step, index, 'status') == TaskStatus.ERROR:
failed[step][index] = True
else:
for metric in self.getkeys('metric', step, index):
if self.valid('flowgraph', flow, step, index, 'goal', metric):
goal = self.get('flowgraph', flow, step, index, 'goal', metric)
real = self.get('metric', step, index, metric)
if abs(real) > goal:
self.logger.warning(f"Step {step}{index} failed "
f"because it didn't meet goals for '{metric}' "
"metric.")
failed[step][index] = True
# Calculate max/min values for each metric
max_val = {}
min_val = {}
for metric in self.getkeys('flowgraph', flow, step, '0', 'weight'):
max_val[metric] = 0
min_val[metric] = float("inf")
for step, index in steplist:
if not failed[step][index]:
real = self.get('metric', step, index, metric)
max_val[metric] = max(max_val[metric], real)
min_val[metric] = min(min_val[metric], real)
# Select the minimum index
best_score = float('inf') if op == 'minimum' else float('-inf')
winner = None
for step, index in steplist:
if failed[step][index]:
continue
score = 0.0
for metric in self.getkeys('flowgraph', flow, step, index, 'weight'):
weight = self.get('flowgraph', flow, step, index, 'weight', metric)
if not weight:
# skip if weight is 0 or None
continue
real = self.get('metric', step, index, metric)
if not (max_val[metric] - min_val[metric]) == 0:
scaled = (real - min_val[metric]) / (max_val[metric] - min_val[metric])
else:
scaled = max_val[metric]
score = score + scaled * weight
if ((op == 'minimum' and score < best_score) or
(op == 'maximum' and score > best_score)):
best_score = score
winner = (step,index)
return (best_score, winner)
###########################################################################
def verify(self, *tasks, **assertion):
'''
Tests an assertion on a list of input tasks.
The provided steplist is verified to ensure that all assertions
are True. If any of the assertions fail, False is returned.
Assertions are passed in as kwargs, with the key being a metric
and the value being a number and an optional conditional operator.
The allowed conditional operators are: >, <, >=, <=
Args:
*steps (str): List of steps to verify
**assertion (str='str'): Assertion to check on metric
Returns:
True if all assertions hold True for all steps.
Example:
>>> pass = chip.verify(['drc','lvs'], errors=0)
Pass is True if the error metrics in the drc, lvs steps is 0.
'''
#TODO: implement
return True
###########################################################################
def mux(self, *tasks, **selector):
'''
Selects a task from a list of inputs.
The selector criteria provided is used to create a custom function
for selecting the best step/index pair from the inputs. Metrics and
weights are passed in and used to select the step/index based on
the minimum or maximum score depending on the 'op' argument.
The function can be used to bypass the flows weight functions for
the purpose of conditional flow execution and verification.
Args:
*steps (str): List of steps to verify
**selector: Key value selection criteria.
Returns:
True if all assertions hold True for all steps.
Example:
>>> sel_stepindex = chip.mux(['route'], wirelength=0)
Selects the routing stepindex with the shortest wirelength.
'''
#TODO: modify the _minmax function to feed in alternate weight path
return None
###########################################################################
def _runtask(self, step, index, status):
'''
Private per step run method called by run().
The method takes in a step string and index string to indicated what
to run.
Execution flow:
T1. Start wall timer
T2. Defer job to compute node if using job scheduler
T3. Set up working directory + chdir
T4. Merge manifests from all input dependancies
T5. Write manifest to input directory for convenience
T6. Reset all metrics to 0 (consider removing)
T7. Select inputs
T8. Copy data from previous step outputs into inputs
T9. Check manifest
T10. Run pre_process() function
T11. Set environment variables
T12. Check EXE version
T13. Save manifest as TCL/YAML
T14. Start CPU timer
T15. Run EXE
T16. stop CPU timer
T17. Run post_process()
T18. Check log file
T19. Hash all task files
T20. Stop Wall timer
T21. Make a task record
T22. Save manifest to disk
T23. Halt if any errors found
T24. Clean up
T25. chdir
Note that since _runtask occurs in its own process with a separate
address space, any changes made to the `self` object will not
be reflected in the parent. We rely on reading/writing the chip manifest
to the filesystem to communicate updates between processes.
'''
self._init_logger(step, index, in_run=True)
##################
# Shared parameters (long function!)
design = self.get('design')
flow = self.get('option', 'flow')
tool = self.get('flowgraph', flow, step, index, 'tool')
quiet = self.get('option', 'quiet') and (step not in self.get('option', 'bkpt'))
##################
# 1. Start wall timer
wall_start = time.time()
##################
# 2. Defer job to compute node
# If the job is configured to run on a cluster, collect the schema
# and send it to a compute node for deferred execution.
# (Run the initial 'import' stage[s] locally)
if self.get('option', 'jobscheduler') and \
self.get('flowgraph', flow, step, index, 'input'):
# Note: The _deferstep method blocks until the compute node
# finishes processing this step, and it sets the active/error bits.
_deferstep(self, step, index, status)
return
##################
# 3. Directory setup
# support for sharing data across jobs
job = self.get('option', 'jobname')
in_job = job
if step in self.getkeys('option', 'jobinput'):
if index in self.getkeys('option', 'jobinput', step):
in_job = self.get('option', 'jobinput', step, index)
workdir = self._getworkdir(step=step,index=index)
cwd = os.getcwd()
if os.path.isdir(workdir):
shutil.rmtree(workdir)
os.makedirs(workdir, exist_ok=True)
os.chdir(workdir)
os.makedirs('outputs', exist_ok=True)
os.makedirs('reports', exist_ok=True)
##################
# 4. Merge manifests from all input dependancies
all_inputs = []
if not self.get('option', 'remote'):
for in_step, in_index in self.get('flowgraph', flow, step, index, 'input'):
in_task_status = status[in_step + in_index]
self.set('flowgraph', flow, in_step, in_index, 'status', in_task_status)
if in_task_status != TaskStatus.ERROR:
cfgfile = f"../../../{in_job}/{in_step}/{in_index}/outputs/{design}.pkg.json"
self._read_manifest(cfgfile, clobber=False, partial=True)
##################
# 5. Write manifest prior to step running into inputs
self.set('arg', 'step', None, clobber=True)
self.set('arg', 'index', None, clobber=True)
os.makedirs('inputs', exist_ok=True)
#self.write_manifest(f'inputs/{design}.pkg.json')
##################
# 6. Make metrics zero
# TODO: There should be no need for this, but need to fix
# without it we need to be more careful with flows to make sure
# things like the builtin functions don't look at None values
for metric in self.getkeys('metric', 'default', 'default'):
self.set('metric', step, index, metric, 0)
##################
# 7. Select inputs
args = self.get('flowgraph', flow, step, index, 'args')
inputs = self.get('flowgraph', flow, step, index, 'input')
sel_inputs = []
score = 0
if tool in self.builtin:
self.logger.info(f"Running built in task '{tool}'")
# Figure out which inputs to select
if tool == 'minimum':
(score, sel_inputs) = self.minimum(*inputs)
elif tool == "maximum":
(score, sel_inputs) = self.maximum(*inputs)
elif tool == "mux":
(score, sel_inputs) = self.mux(*inputs, selector=args)
elif tool == "join":
sel_inputs = self.join(*inputs)
elif tool == "verify":
if not self.verify(*inputs, assertion=args):
self._haltstep(step, index)
else:
sel_inputs = self.get('flowgraph', flow, step, index, 'input')
if sel_inputs == None:
self.logger.error(f'No inputs selected after running {tool}')
self._haltstep(step, index)
self.set('flowgraph', flow, step, index, 'select', sel_inputs)
##################
# 8. Copy (link) output data from previous steps
if step == 'import':
self._collect(step, index)
if not self.get('flowgraph', flow, step, index,'input'):
all_inputs = []
elif not self.get('flowgraph', flow, step, index, 'select'):
all_inputs = self.get('flowgraph', flow, step, index,'input')
else:
all_inputs = self.get('flowgraph', flow, step, index, 'select')
for in_step, in_index in all_inputs:
if self.get('flowgraph', flow, in_step, in_index, 'status') == TaskStatus.ERROR:
self.logger.error(f'Halting step due to previous error in {in_step}{in_index}')
self._haltstep(step, index)
# Skip copying pkg.json files here, since we write the current chip
# configuration into inputs/{design}.pkg.json earlier in _runstep.
utils.copytree(f"../../../{in_job}/{in_step}/{in_index}/outputs", 'inputs/', dirs_exist_ok=True,
ignore=[f'{design}.pkg.json'], link=True)
##################
# 9. Check manifest
self.set('arg', 'step', step, clobber=True)
self.set('arg', 'index', index, clobber=True)
if not self.get('option', 'skipcheck'):
if self.check_manifest():
self.logger.error(f"Fatal error in check_manifest()! See previous errors.")
self._haltstep(step, index)
##################
# 10. Run preprocess step for tool
if tool not in self.builtin:
func = self.find_function(tool, "pre_process", 'tools')
if func:
func(self)
if self.error:
self.logger.error(f"Pre-processing failed for '{tool}'")
self._haltstep(step, index)
##################
# 11. Set environment variables
# License file configuration.
for item in self.getkeys('tool', tool, 'licenseserver'):
license_file = self.get('tool', tool, 'licenseserver', item)
if license_file:
os.environ[item] = ':'.join(license_file)
# Tool-specific environment variables for this task.
if (step in self.getkeys('tool', tool, 'env')) and \
(index in self.getkeys('tool', tool, 'env', step)):
for item in self.getkeys('tool', tool, 'env', step, index):
os.environ[item] = self.get('tool', tool, 'env', step, index, item)
##################
# 12. Check exe version
vercheck = not self.get('option', 'novercheck')
veropt = self.get('tool', tool, 'vswitch')
exe = self._getexe(tool)
version = None
toolpath = exe # For record
if exe is not None:
exe_path, exe_base = os.path.split(exe)
if veropt:
cmdlist = [exe]
cmdlist.extend(veropt)
proc = subprocess.run(cmdlist, stdout=PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
parse_version = self.find_function(tool, 'parse_version', 'tools')
if parse_version is None:
self.logger.error(f'{tool} does not implement parse_version.')
self._haltstep(step, index)
version = parse_version(proc.stdout)
self.logger.info(f"Tool '{exe_base}' found with version '{version}' in directory '{exe_path}'")
if vercheck and not self._check_version(version, tool):
self._haltstep(step, index)
else:
self.logger.info(f"Tool '{exe_base}' found in directory '{exe_path}'")
elif tool not in self.builtin:
exe_base = self.get('tool', tool, 'exe')
self.logger.error(f'Executable {exe_base} not found')
self._haltstep(step, index)
##################
# 13. Write manifest (tool interface) (Don't move this!)
suffix = self.get('tool', tool, 'format')
if suffix:
pruneopt = bool(suffix!='tcl')
self.write_manifest(f"sc_manifest.{suffix}", prune=pruneopt, abspath=True)
##################
# 14. Start CPU Timer
self.logger.debug(f"Starting executable")
cpu_start = time.time()
##################
# 15. Run executable (or copy inputs to outputs for builtin functions)
# TODO: Currently no memory usage tracking in breakpoints, builtins, or unexpected errors.
max_mem_bytes = 0
if tool in self.builtin:
utils.copytree(f"inputs", 'outputs', dirs_exist_ok=True, link=True)
elif not self.get('option', 'skipall'):
cmdlist = self._makecmd(tool, step, index)
exe_base = os.path.basename(cmdlist[0])
cmdstr = ' '.join([exe_base] + cmdlist[1:])
self.logger.info('Running in %s', workdir)
self.logger.info('%s', cmdstr)
timeout = self.get('flowgraph', flow, step, index, 'timeout')
logfile = step + '.log'
if sys.platform in ('darwin', 'linux') and step in self.get('option', 'bkpt'):
# When we break on a step, the tool often drops into a shell.
# However, our usual subprocess scheme seems to break terminal
# echo for some tools. On POSIX-compatible systems, we can use
# pty to connect the tool to our terminal instead. This code
# doesn't handle quiet/timeout logic, since we don't want either
# of these features for an interactive session. Logic for
# forwarding to file based on
# https://docs.python.org/3/library/pty.html#example.
logfile = step + '.log'
with open(logfile, 'wb') as log_writer:
def read(fd):
data = os.read(fd, 1024)
log_writer.write(data)
return data
import pty # Note: this import throws exception on Windows
retcode = pty.spawn(cmdlist, read)
else:
stdout_file = ''
stdout_suffix = self.get('tool', tool, 'stdout', step, index, 'suffix')
if self.get('tool', tool, 'stdout', step, index, 'destination') == 'log':
stdout_file = step + "." + stdout_suffix
elif self.get('tool', tool, 'stdout', step, index, 'destination') == 'output':
stdout_file = os.path.join('outputs', self.get('design')) + "." + stdout_suffix
elif self.get('tool', tool, 'stdout', step, index, 'destination') == 'none':
stdout_file = os.devnull
else:
destination = self.get('tool', tool, 'stdout', step, index, 'destination')
self.logger.error(f'stdout/destination has no support for {destination}. Use [log|output|none].')
self._haltstep(step, index)
stderr_file = ''
stderr_suffix = self.get('tool', tool, 'stderr', step, index, 'suffix')
if self.get('tool', tool, 'stderr', step, index, 'destination') == 'log':
stderr_file = step + "." + stderr_suffix
elif self.get('tool', tool, 'stderr', step, index, 'destination') == 'output':
stderr_file = os.path.join('outputs', self.get('design')) + "." + stderr_suffix
elif self.get('tool', tool, 'stderr', step, index, 'destination') == 'none':
stderr_file = os.devnull
else:
destination = self.get('tool', tool, 'stderr', step, index, 'destination')
self.logger.error(f'stderr/destination has no support for {destination}. Use [log|output|none].')
self._haltstep(step, index)
with open(stdout_file, 'w') as stdout_writer, open(stdout_file, 'r') as stdout_reader, open(stderr_file, 'w') as stderr_writer, open(stderr_file, 'r') as stderr_reader:
# Use separate reader/writer file objects as hack to display
# live output in non-blocking way, so we can monitor the
# timeout. Based on https://stackoverflow.com/a/18422264.
is_stdout_log = self.get('tool', tool, 'stdout', step, index, 'destination') == 'log'
is_stderr_log = self.get('tool', tool, 'stderr', step, index, 'destination') == 'log' and stderr_file != stdout_file
# if STDOUT and STDERR are to be redirected to the same file,
# use a single writer
if stderr_file == stdout_file:
stderr_writer.close()
stderr_reader.close()
stderr_writer = subprocess.STDOUT
cmd_start_time = time.time()
proc = subprocess.Popen(cmdlist,
stdout=stdout_writer,
stderr=stderr_writer)
while proc.poll() is None:
# Gather subprocess memory usage.
try:
pproc = psutil.Process(proc.pid)
max_mem_bytes = max(max_mem_bytes, pproc.memory_full_info().uss)
except psutil.Error:
# Process may have already terminated or been killed.
# Retain existing memory usage statistics in this case.
pass
# Loop until process terminates
if not quiet:
if is_stdout_log:
sys.stdout.write(stdout_reader.read())
if is_stderr_log:
sys.stdout.write(stderr_reader.read())
if timeout is not None and time.time() - cmd_start_time > timeout:
self.logger.error(f'Step timed out after {timeout} seconds')
proc.terminate()
self._haltstep(step, index)
time.sleep(0.1)
# Read the remaining
if not quiet:
if is_stdout_log:
sys.stdout.write(stdout_reader.read())
if is_stderr_log:
sys.stdout.write(stderr_reader.read())
retcode = proc.returncode
if retcode != 0:
self.logger.warning('Command failed with code %d. See log file %s', retcode, os.path.abspath(logfile))
if not self.get('tool', tool, 'continue'):
self._haltstep(step, index)
##################
# 16. Capture cpu runtime and memory footprint.
cpu_end = time.time()
cputime = round((cpu_end - cpu_start),2)
self.set('metric', step, index, 'exetime', cputime)
self.set('metric', step, index, 'memory', max_mem_bytes)
##################
# 17. Post process (could fail)
post_error = 0
if (tool not in self.builtin) and (not self.get('option', 'skipall')) :
func = self.find_function(tool, 'post_process', 'tools')
if func:
post_error = func(self)
if post_error:
self.logger.error('Post-processing check failed')
if not self.get('tool', tool, 'continue'):
self._haltstep(step, index)
##################
# 18. Check log file (must be after post-process)
if (tool not in self.builtin) and (not self.get('option', 'skipall')) :
self.check_logfile(step=step, index=index, display=not quiet)
##################
# 19. Hash files
if self.get('option', 'hash') and (tool not in self.builtin):
# hash all outputs
self.hash_files('tool', tool, 'output', step, index)
# hash all requirements
if self.valid('tool', tool, 'require', step, index, quiet=True):
for item in self.get('tool', tool, 'require', step, index):
args = item.split(',')
if 'file' in self.get(*args, field='type'):
self.hash_files(*args)
##################
# 20. Capture wall runtime and cpu cores
wall_end = time.time()
walltime = round((wall_end - wall_start),2)
self.set('metric',step, index, 'tasktime', walltime)
self.logger.info(f"Finished task in {walltime}s")
##################
# 21. Make a record if tracking is enabled
if self.get('option', 'track'):
self._make_record(step, index, wall_start, wall_end, version, toolpath, cmdlist[1:])
##################
# 22. Save a successful manifest
self.set('flowgraph', flow, step, index, 'status', TaskStatus.SUCCESS)
self.set('arg', 'step', None, clobber=True)
self.set('arg', 'index', None, clobber=True)
self.write_manifest(os.path.join("outputs", f"{design}.pkg.json"))
##################
# 23. Stop if there are errors
if self.get('metric',step, index, 'errors') > 0:
if not self.get('tool', tool, 'continue'):
self._haltstep(step, index)
##################
# 24. Clean up non-essential files
if self.get('option', 'clean'):
self._eda_clean(tool, step, index)
##################
# 25. return to original directory
os.chdir(cwd)
###########################################################################
def _haltstep(self, step, index, log=True):
if log:
self.logger.error(f"Halting step '{step}' index '{index}' due to errors.")
sys.exit(1)
###########################################################################
def _eda_clean(self, tool, step, index):
'''Cleans up work directory of unecessary files.
Assumes our cwd is the workdir for step and index.
'''
keep = ['inputs', 'outputs', 'reports', f'{step}.log', 'replay.sh']
manifest_format = self.get('tool', tool, 'format')
if manifest_format:
keep.append(f'sc_manifest.{manifest_format}')
for suffix in self.getkeys('tool', tool, 'regex', step, index):
keep.append(f'{step}.{suffix}')
# Tool-specific keep files
if self.valid('tool', tool, 'keep', step, index):
keep.extend(self.get('tool', tool, 'keep', step, index))
for path in os.listdir():
if path in keep:
continue
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
###########################################################################
def run(self):
'''
Executes tasks in a flowgraph.
The run function sets up tools and launches runs for every index
in a step defined by a steplist. The steplist is taken from the schema
steplist parameter if defined, otherwise the steplist is defined
as the list of steps within the schema flowgraph dictionary. Before
starting the process, tool modules are loaded and setup up for each
step and index based on on the schema eda dictionary settings.
Once the tools have been set up, the manifest is checked using the
check_manifest() function and files in the manifest are hashed based
on the 'hashmode' schema setting.
Once launched, each process waits for preceding steps to complete,
as defined by the flowgraph 'inputs' parameter. Once a all inputs
are ready, previous steps are checked for errors before the
process entered a local working directory and starts to run
a tool or to execute a built in Chip function.
Fatal errors within a step/index process cause all subsequent
processes to exit before start, returning control to the the main
program which can then exit.
Examples:
>>> run()
Runs the execution flow defined by the flowgraph dictionary.
'''
flow = self.get('option', 'flow')
# Re-init logger to include run info after setting up flowgraph.
self._init_logger(in_run=True)
# Run steps if set, otherwise run whole graph
if self.get('arg', 'step'):
steplist = [self.get('arg', 'step')]
elif self.get('option', 'steplist'):
steplist = self.get('option', 'steplist')
else:
steplist = self.list_steps()
if not self.get('option', 'resume'):
# If no step(list) was specified, the whole flow is being run
# start-to-finish. Delete the build dir to clear stale results.
cur_job_dir = self._getworkdir()
if os.path.isdir(cur_job_dir):
shutil.rmtree(cur_job_dir)
# List of indices to run per step. Precomputing this ensures we won't
# have any problems if [arg, index] gets clobbered, and reduces logic
# repetition.
indexlist = {}
for step in steplist:
if self.get('arg', 'index'):
indexlist[step] = [self.get('arg', 'index')]
elif self.get('option', 'indexlist'):
indexlist[step] = self.get("option", 'indexlist')
else:
indexlist[step] = self.getkeys('flowgraph', flow, step)
# Reset flowgraph/records/metrics by probing build directory. We need
# to set values to None for steps we may re-run so that merging
# manifests from _runtask() actually updates values.
should_resume = self.get("option", 'resume')
for step in self.getkeys('flowgraph', flow):
all_indices_failed = True
for index in self.getkeys('flowgraph', flow, step):
stepdir = self._getworkdir(step=step, index=index)
cfg = f"{stepdir}/outputs/{self.get('design')}.pkg.json"
in_steplist = step in steplist and index in indexlist[step]
if not os.path.isdir(stepdir) or (in_steplist and not should_resume):
# If stepdir doesn't exist, we need to re-run this task. If
# we're not running with -resume, we also re-run anything
# in the steplist.
self.set('flowgraph', flow, step, index, 'status', None)
for metric in self.getkeys('metric', 'default', 'default'):
self.set('metric', step, index, metric, None)
for record in self.getkeys('record', 'default', 'default'):
self.set('record', step, index, record, None)
elif os.path.isfile(cfg):
self.set('flowgraph', flow, step, index, 'status', TaskStatus.SUCCESS)
all_indices_failed = False
else:
self.set('flowgraph', flow, step, index, 'status', TaskStatus.ERROR)
if should_resume and all_indices_failed and step in steplist:
# When running with -resume, we re-run any step in steplist that
# had all indices fail.
for index in self.getkeys('flowgraph', flow, step):
if index in indexlist[step]:
self.set('flowgraph', flow, step, index, 'status', None)
for metric in self.getkeys('metric', 'default', 'default'):
self.set('metric', step, index, metric, None)
for record in self.getkeys('record', 'default', 'default'):
self.set('record', step, index, record, None)
# Set env variables
for envvar in self.getkeys('option', 'env'):
val = self.get('option', 'env', envvar)
os.environ[envvar] = val
# Remote workflow: Dispatch the Chip to a remote server for processing.
if self.get('option','remote'):
# Load the remote storage config into the status dictionary.
if self.get('option','credentials'):
# Use the provided remote credentials file.
cfg_file = self.get('option','credentials')[-1]
cfg_dir = os.path.dirname(cfg_file)
else:
# Use the default config file path.
cfg_dir = os.path.join(Path.home(), '.sc')
cfg_file = os.path.join(cfg_dir, 'credentials')
if (not os.path.isdir(cfg_dir)) or (not os.path.isfile(cfg_file)):
self.logger.error('Could not find remote server configuration - please run "sc-configure" and enter your server address and credentials.')
raise SiliconCompilerError('Valid remote credentials could not be found.')
with open(cfg_file, 'r') as cfgf:
self.status['remote_cfg'] = json.loads(cfgf.read())
if (not 'address' in self.status['remote_cfg']):
self.logger.error('Improperly formatted remote server configuration - please run "sc-configure" and enter your server address and credentials.')
raise SiliconCompilerError('Valid remote credentials could not be found.')
# Pre-process: Run an 'import' stage locally, and upload the
# in-progress build directory to the remote server.
# Data is encrypted if user / key were specified.
# run remote process
remote_preprocess(self)
# Run the job on the remote server, and wait for it to finish.
remote_run(self)
# Fetch results (and delete the job's data from the server).
fetch_results(self)
# Read back configuration from final manifest.
cfg = os.path.join(self._getworkdir(),f"{self.get('design')}.pkg.json")
if os.path.isfile(cfg):
local_dir = self.get('option','builddir')
self.read_manifest(cfg, clobber=True, clear=True)
self.set('option', 'builddir', local_dir)
else:
# Hack to find first failed step by checking for presence of
# output manifests.
# TODO: fetch_results() should return info about step failures.
failed_step = steplist[-1]
for step in steplist[:-1]:
step_has_cfg = False
for index in indexlist[step]:
stepdir = self._getworkdir(step=step, index=index)
cfg = f"{stepdir}/outputs/{self.get('design')}.pkg.json"
if os.path.isfile(cfg):
step_has_cfg = True
break
if not step_has_cfg:
failed_step = step
break
stepdir = self._getworkdir(step=failed_step)[:-1]
raise SiliconCompilerError(f'Run() failed on step {failed_step}! '
f'See logs in {stepdir} for error details.')
else:
status = {}
# Populate status dict with any flowgraph status values that have already
# been set.
for step in self.getkeys('flowgraph', flow):
for index in self.getkeys('flowgraph', flow, step):
stepstr = step + index
task_status = self.get('flowgraph', flow, step, index, 'status')
if task_status is not None:
status[step + index] = task_status
else:
status[step + index] = TaskStatus.PENDING
# Setup tools for all tasks to run.
for step in steplist:
for index in indexlist[step]:
# Setting up tool is optional
tool = self.get('flowgraph', flow, step, index, 'tool')
if tool not in self.builtin:
self.set('arg','step', step)
self.set('arg','index', index)
func = self.find_function(tool, 'setup', 'tools')
if func is None:
self.logger.error(f'setup() not found for tool {tool}')
sys.exit(1)
func(self)
# Need to clear index, otherwise we will skip
# setting up other indices. Clear step for good
# measure.
self.set('arg','step', None)
self.set('arg','index', None)
# Implement auto-update of jobincrement
try:
alljobs = os.listdir(self.get('option','builddir') + "/" + self.get('design'))
if self.get('option','jobincr'):
jobid = 0
for item in alljobs:
m = re.match(self.get('option','jobname')+r'(\d+)', item)
if m:
jobid = max(jobid, int(m.group(1)))
self.set('option', 'jobid', str(jobid + 1))
except:
pass
# Check validity of setup
self.logger.info("Checking manifest before running.")
if not self.get('option','skipcheck'):
self.check_manifest()
# Check if there were errors before proceeding with run
if self.error:
self.logger.error(f"Check failed. See previous errors.")
raise SiliconCompilerError(f"Manifest checks failed.")
# For each task to run, prepare a process and store its dependencies
jobname = self.get('option','jobname')
tasks_to_run = {}
processes = {}
for step in steplist:
for index in indexlist[step]:
if status[step+index] != TaskStatus.PENDING:
continue
inputs = [step+index for step, index in self.get('flowgraph', flow, step, index, 'input')]
if (step in self.getkeys('option','jobinput') and
index in self.getkeys('option','jobinput', step) and
self.get('option','jobinput', step, index) != jobname):
# If we specify a different job as input to this task,
# we assume we are good to run it.
tasks_to_run[step+index] = []
else:
tasks_to_run[step+index] = inputs
processes[step+index] = multiprocessing.Process(target=self._runtask,
args=(step, index, status))
# We have to deinit the chip's logger before spawning the processes
# since the logger object is not serializable. _runtask_safe will
# reinitialize the logger in each new process, and we reinitialize
# the primary chip's logger after the processes complete.
self._deinit_logger()
running_tasks = []
while len(tasks_to_run) > 0 or len(running_tasks) > 0:
# Check for new tasks that can be launched.
for task, deps in list(tasks_to_run.items()):
# TODO: breakpoint logic:
# if task is bkpt, then don't launch while len(running_tasks) > 0
# Clear any tasks that have finished from dependency list.
for in_task in deps.copy():
if status[in_task] != TaskStatus.PENDING:
deps.remove(in_task)
# If there are no dependencies left, launch this task and
# remove from tasks_to_run.
if len(deps) == 0:
processes[task].start()
running_tasks.append(task)
del tasks_to_run[task]
# Check for situation where we have stuff left to run but don't
# have any tasks running. This shouldn't happen, but we will get
# stuck in an infinite loop if it does, so we want to break out
# with an explicit error.
if len(tasks_to_run) > 0 and len(running_tasks) == 0:
raise SiliconCompilerError('Tasks left to run, but no '
'running tasks. Steplist may be invalid.')
# Check for completed tasks.
# TODO: consider staying in this section of loop until a task
# actually completes.
for task in running_tasks.copy():
if not processes[task].is_alive():
running_tasks.remove(task)
if processes[task].exitcode > 0:
status[task] = TaskStatus.ERROR
else:
status[task] = TaskStatus.SUCCESS
# TODO: exponential back-off with max?
time.sleep(0.1)
self._init_logger()
# Make a clean exit if one of the steps failed
for step in steplist:
index_succeeded = False
for index in indexlist[step]:
stepstr = step + index
if status[stepstr] != TaskStatus.ERROR:
index_succeeded = True
break
if not index_succeeded:
raise SiliconCompilerError('Run() failed, see previous errors.')
# On success, write out status dict to flowgraph status'. We do this
# since certain scenarios won't be caught by reading in manifests (a
# failing step doesn't dump a manifest). For example, if the
# steplist's final step has two indices and one fails.
for step in steplist:
for index in indexlist[step]:
stepstr = step + index
if status[stepstr] != TaskStatus.PENDING:
self.set('flowgraph', flow, step, index, 'status', status[stepstr])
# Merge cfg back from last executed runsteps.
# Note: any information generated in steps that do not merge into the
# last step will not be picked up in this chip object.
# TODO: we might as well fix this? We can add a helper function to
# find all steps in the steplist that don't lead to others.
laststep = steplist[-1]
for index in indexlist[laststep]:
lastdir = self._getworkdir(step=laststep, index=index)
# This no-op listdir operation is important for ensuring we have
# a consistent view of the filesystem when dealing with NFS.
# Without this, this thread is often unable to find the final
# manifest of runs performed on job schedulers, even if they
# completed successfully. Inspired by:
# https://stackoverflow.com/a/70029046.
os.listdir(os.path.dirname(lastdir))
lastcfg = f"{lastdir}/outputs/{self.get('design')}.pkg.json"
if status[laststep+index] == TaskStatus.SUCCESS:
self._read_manifest(lastcfg, clobber=False, partial=True)
else:
self.set('flowgraph', flow, laststep, index, 'status', TaskStatus.ERROR)
# Clear scratchpad args since these are checked on run() entry
self.set('arg', 'step', None, clobber=True)
self.set('arg', 'index', None, clobber=True)
# Store run in history
self.record_history()
# Storing manifest in job root directory
filepath = os.path.join(self._getworkdir(),f"{self.get('design')}.pkg.json")
self.write_manifest(filepath)
##########################################################################
def record_history(self):
'''
Copies all non-empty parameters from current job into the history
dictionary.
'''
# initialize new dict
jobname = self.get('option','jobname')
self.cfg['history'][jobname] = {}
# copy in all empty values of scope job
allkeys = self.getkeys()
for key in allkeys:
# ignore history in case of cumulative history
if key[0] != 'history':
scope = self.get(*key, field='scope')
if not self._keypath_empty(key) and (scope == 'job'):
self._copyparam(self.cfg,
self.cfg['history'][jobname],
key)
###########################################################################
def _copyparam(self, cfgsrc, cfgdst, keypath):
'''
Copies a parameter into the manifest history dictionary.
'''
# 1. decend keypath, pop each key as its used
# 2. create key if missing in destination dict
# 3. populate leaf cell when keypath empty
if keypath:
key = keypath[0]
keypath.pop(0)
if key not in cfgdst.keys():
cfgdst[key] = {}
self._copyparam(cfgsrc[key], cfgdst[key], keypath)
else:
for key in cfgsrc.keys():
if key not in ('example', 'switch', 'help'):
cfgdst[key] = copy.deepcopy(cfgsrc[key])
###########################################################################
def show(self, filename=None, extra_options=None):
'''
Opens a graphical viewer for the filename provided.
The show function opens the filename specified using a viewer tool
selected based on the file suffix and the 'showtool' schema setup.
The 'showtool' parameter binds tools with file suffixes, enabling the
automated dynamic loading of tool setup functions from
siliconcompiler.tools.<tool>/<tool>.py. Display settings and
technology settings for viewing the file are read from the
in-memory chip object schema settings. All temporary render and
display files are saved in the <build_dir>/_show directory.
The show() command can also be used to display content from an SC
schema .json filename provided. In this case, the SC schema is
converted to html and displayed as a 'dashboard' in the browser.
Filenames with .gz and .zip extensions are automatically unpacked
before being displayed.
Args:
filename: Name of file to display
Examples:
>>> show('build/oh_add/job0/export/0/outputs/oh_add.gds')
Displays gds file with a viewer assigned by 'showtool'
>>> show('build/oh_add/job0/export/0/outputs/oh_add.pkg.json')
Displays manifest in the browser
'''
if extra_options is None:
extra_options = []
# Finding last layout if no argument specified
if filename is None:
self.logger.info('Searching build directory for layout to show.')
design = self.get('design')
# TODO: consider a more flexible approach here. I tried doing a
# reverse search through all steps, but when verification steps are
# enabled this finds a DEF passed into LVS rather than the GDS
# Perhaps we could have a way for flows to register their "final"
# output.
laststep = 'export'
lastindex = '0'
lastdir = self._getworkdir(step=laststep, index=lastindex)
gds_file= f"{lastdir}/outputs/{design}.gds"
def_file = f"{lastdir}/outputs/{design}.def"
if os.path.isfile(gds_file):
filename = gds_file
elif os.path.isfile(def_file):
filename = def_file
if filename is None:
self.logger.error('Unable to automatically find layout in build directory.')
self.logger.error('Try passing in a full path to show() instead.')
return 1
self.logger.info('Showing file %s', filename)
# Parsing filepath
filepath = os.path.abspath(filename)
basename = os.path.basename(filepath)
localfile = basename.replace(".gz","")
filetype = os.path.splitext(localfile)[1].lower().replace(".","")
#Check that file exists
if not os.path.isfile(filepath):
self.logger.error(f"Invalid filepath {filepath}.")
return 1
# Opening file from temp directory
cwd = os.getcwd()
showdir = self.get('option','builddir') + "/_show"
os.makedirs(showdir, exist_ok=True)
os.chdir(showdir)
# Uncompress file if necessary
if os.path.splitext(filepath)[1].lower() == ".gz":
with gzip.open(filepath, 'rb') as f_in:
with open(localfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
shutil.copy(filepath, localfile)
#Figure out which tool to use for opening data
if filetype in self.getkeys('option','showtool'):
# Using env variable and manifest to pass arguments
os.environ['SC_FILENAME'] = localfile
# Setting up tool
tool = self.get('option','showtool', filetype)
step = 'show'+filetype
index = "0"
self.set('arg', 'step', step)
self.set('arg', 'index', index)
setup_tool = self.find_function(tool, 'setup', 'tools')
setup_tool(self, mode='show')
self.write_manifest("sc_manifest.tcl", abspath=True)
self.write_manifest("sc_manifest.json", abspath=True)
self.set('arg', 'step', None)
self.set('arg', 'index', None)
exe = self._getexe(tool)
if shutil.which(exe) is None:
self.logger.error(f'Executable {exe} not found.')
success = False
else:
# Running command
cmdlist = self._makecmd(tool, step, index, extra_options=extra_options)
proc = subprocess.run(cmdlist)
success = proc.returncode == 0
else:
self.logger.error(f"Filetype '{filetype}' not set up in 'showtool' parameter.")
success = False
# Returning to original directory
os.chdir(cwd)
return success
def read_lef(self, path, pdkname, stackup):
'''Reads tech LEF and imports data into schema.
This function reads layer information from a provided tech LEF and uses
it to fill out the 'pdk', <pdkname>, 'grid' keypaths of the current chip
object.
Args:
path (str): Path to LEF file.
pdkname (str): Name of PDK associated with LEF file.
stackup (str): Stackup associated with LEF file.
'''
data = leflib.parse(path)
layer_index = 1
for name, layer in data['layers'].items():
if layer['type'] != 'ROUTING':
# Skip non-routing layers
continue
sc_name = f'm{layer_index}'
layer_index += 1
self.set('pdk', pdkname, 'grid', stackup, name, 'name', sc_name)
direction = None
if 'direction' in layer:
direction = layer['direction'].lower()
self.set('pdk', pdkname, 'grid', stackup, name, 'dir', direction)
if 'offset' in layer:
offset = layer['offset']
if isinstance(offset, float):
# Per LEF spec, a single offset value applies to the
# preferred routing direction. If one doesn't exist, we'll
# just ignore.
if direction == 'vertical':
self.set('pdk', pdkname, 'grid', stackup, name, 'xoffset', offset)
elif direction == 'horizontal':
self.set('pdk', pdkname, 'grid', stackup, name, 'yoffset', offset)
else:
xoffset, yoffset = offset
self.set('pdk', pdkname, 'grid', stackup, name, 'xoffset', xoffset)
self.set('pdk', pdkname, 'grid', stackup, name, 'yoffset', yoffset)
if 'pitch' in layer:
pitch = layer['pitch']
if isinstance(pitch, float):
# Per LEF spec, a single pitch value applies to both
# directions.
self.set('pdk', pdkname, 'grid', stackup, name, 'xpitch', pitch)
self.set('pdk', pdkname, 'grid', stackup, name, 'ypitch', pitch)
else:
xpitch, ypitch = pitch
self.set('pdk', pdkname, 'grid', stackup, name, 'xpitch', xpitch)
self.set('pdk', pdkname, 'grid', stackup, name, 'ypitch', ypitch)
############################################################################
# Chip helper Functions
############################################################################
def _typecheck(self, cfg, leafkey, value):
''' Schema type checking
'''
ok = True
valuetype = type(value)
errormsg = ""
if (not re.match(r'\[',cfg['type'])) & (valuetype==list):
errormsg = "Value must be scalar."
ok = False
# Iterate over list
else:
# Create list for iteration
if valuetype == list:
valuelist = value
else:
valuelist = [value]
# Make type python compatible
cfgtype = re.sub(r'[\[\]]', '', cfg['type'])
for item in valuelist:
valuetype = type(item)
if ((cfgtype != valuetype.__name__) and (item is not None)):
tupletype = re.match(r'\([\w\,]+\)',cfgtype)
#TODO: check tuples!
if tupletype:
pass
elif cfgtype == 'bool':
if not item in ['true', 'false']:
errormsg = "Valid boolean values are True/False/'true'/'false'"
ok = False
elif cfgtype == 'file':
pass
elif cfgtype == 'dir':
pass
elif (cfgtype == 'float'):
try:
float(item)
except:
errormsg = "Type mismatch. Cannot cast item to float."
ok = False
elif (cfgtype == 'int'):
try:
int(item)
except:
errormsg = "Type mismatch. Cannot cast item to int."
ok = False
elif item is not None:
errormsg = "Type mismach."
ok = False
# Logger message
if type(value) == list:
printvalue = ','.join(map(str, value))
else:
printvalue = str(value)
errormsg = (errormsg +
" Key=" + str(leafkey) +
", Expected Type=" + cfg['type'] +
", Entered Type=" + valuetype.__name__ +
", Value=" + printvalue)
return (ok, errormsg)
#######################################
def _getexe(self, tool):
path = self.get('tool', tool, 'path')
exe = self.get('tool', tool, 'exe')
if exe is None:
return None
syspath = os.getenv('PATH', os.defpath)
if path:
# Prepend 'path' schema var to system path
syspath = self._resolve_env_vars(path) + os.pathsep + syspath
fullexe = shutil.which(exe, path=syspath)
return fullexe
#######################################
def _makecmd(self, tool, step, index, extra_options=None):
'''
Constructs a subprocess run command based on eda tool setup.
Creates a replay script in current directory.
'''
fullexe = self._getexe(tool)
options = []
is_posix = (sys.platform != 'win32')
for option in self.get('tool', tool, 'option', step, index):
options.extend(shlex.split(option, posix=is_posix))
# Add scripts files
if self.valid('tool', tool, 'script', step, index):
scripts = self.find_files('tool', tool, 'script', step, index)
else:
scripts = []
cmdlist = [fullexe]
if extra_options:
cmdlist.extend(extra_options)
cmdlist.extend(options)
cmdlist.extend(scripts)
runtime_options = self.find_function(tool, 'runtime_options', 'tools')
if runtime_options:
for option in runtime_options(self):
cmdlist.extend(shlex.split(option, posix=is_posix))
envvars = {}
for key in self.getkeys('option','env'):
envvars[key] = self.get('option','env', key)
for item in self.getkeys('tool', tool, 'licenseserver'):
license_file = self.get('tool', tool, 'licenseserver', item)
if license_file:
envvars[item] = ':'.join(license_file)
if self.get('tool', tool, 'path'):
envvars['PATH'] = self.get('tool', tool, 'path') + os.pathsep + '$PATH'
if (step in self.getkeys('tool', tool, 'env') and
index in self.getkeys('tool', tool, 'env', step)):
for key in self.getkeys('tool', tool, 'env', step, index):
envvars[key] = self.get('tool', tool, 'env', step, index, key)
#create replay file
script_name = 'replay.sh'
with open(script_name, 'w') as f:
print('#!/bin/bash', file=f)
envvar_cmd = 'export'
for key, val in envvars.items():
print(f'{envvar_cmd} {key}={val}', file=f)
replay_cmdlist = [os.path.basename(cmdlist[0])] + cmdlist[1:]
print(' '.join(f'"{arg}"' if ' ' in arg else arg for arg in replay_cmdlist), file=f)
os.chmod(script_name, 0o755)
return cmdlist
#######################################
def _get_cloud_region(self):
# TODO: add logic to figure out if we're running on a remote cluster and
# extract the region in a provider-specific way.
return 'local'
#######################################
def _make_record(self, step, index, start, end, toolversion, toolpath, cli_args):
'''
Records provenance details for a runstep.
'''
self.set('record', step, index, 'scversion', _metadata.version)
start_date = datetime.datetime.fromtimestamp(start).strftime('%Y-%m-%d %H:%M:%S')
end_date = datetime.datetime.fromtimestamp(end).strftime('%Y-%m-%d %H:%M:%S')
userid = getpass.getuser()
self.set('record', step, index, 'userid', userid)
if toolversion:
self.set('record', step, index, 'toolversion', toolversion)
self.set('record', step, index, 'starttime', start_date)
self.set('record', step, index, 'endtime', end_date)
machine = platform.node()
self.set('record', step, index, 'machine', machine)
self.set('record', step, index, 'region', self._get_cloud_region())
try:
gateways = netifaces.gateways()
ipaddr, interface = gateways['default'][netifaces.AF_INET]
macaddr = netifaces.ifaddresses(interface)[netifaces.AF_LINK][0]['addr']
self.set('record', step, index, 'ipaddr', ipaddr)
self.set('record', step, index, 'macaddr', macaddr)
except KeyError:
self.logger.warning('Could not find default network interface info')
system = platform.system()
if system == 'Darwin':
lower_sys_name = 'macos'
else:
lower_sys_name = system.lower()
self.set('record', step, index, 'platform', lower_sys_name)
if system == 'Linux':
distro_name = distro.id()
self.set('record', step, index, 'distro', distro_name)
if system == 'Darwin':
osversion, _, _ = platform.mac_ver()
elif system == 'Linux':
osversion = distro.version()
else:
osversion = platform.release()
self.set('record', step, index, 'osversion', osversion)
if system == 'Linux':
kernelversion = platform.release()
elif system == 'Windows':
kernelversion = platform.version()
elif system == 'Darwin':
kernelversion = platform.release()
else:
kernelversion = None
if kernelversion:
self.set('record', step, index, 'kernelversion', kernelversion)
arch = platform.machine()
self.set('record', step, index, 'arch', arch)
self.set('record', step, index, 'toolpath', toolpath)
toolargs = ' '.join(f'"{arg}"' if ' ' in arg else arg for arg in cli_args)
self.set('record', step, index, 'toolargs', toolargs)
#######################################
def _safecompare(self, value, op, goal):
# supported relational oprations
# >, >=, <=, <. ==, !=
if op == ">":
return(bool(value>goal))
elif op == ">=":
return(bool(value>=goal))
elif op == "<":
return(bool(value<goal))
elif op == "<=":
return(bool(value<=goal))
elif op == "==":
return(bool(value==goal))
elif op == "!=":
return(bool(value!=goal))
else:
self.error = 1
self.logger.error(f"Illegal comparison operation {op}")
#######################################
def _getworkdir(self, jobname=None, step=None, index='0'):
'''Create a step directory with absolute path
'''
if jobname is None:
jobname = self.get('option','jobname')
dirlist =[self.cwd,
self.get('option','builddir'),
self.get('design'),
jobname]
# Return jobdirectory if no step defined
# Return index 0 by default
if step is not None:
dirlist.append(step)
dirlist.append(index)
return os.path.join(*dirlist)
#######################################
def _resolve_env_vars(self, filepath):
resolved_path = os.path.expandvars(filepath)
# variables that don't exist in environment get ignored by `expandvars`,
# but we can do our own error checking to ensure this doesn't result in
# silent bugs
envvars = re.findall(r'\$(\w+)', resolved_path)
for var in envvars:
self.logger.warning(f'Variable {var} in {filepath} not defined in environment')
return resolved_path
#######################################
def _get_imported_filename(self, pathstr):
''' Utility to map collected file to an unambigious name based on its path.
The mapping looks like:
path/to/file.ext => file_<md5('path/to/file.ext')>.ext
'''
path = pathlib.Path(pathstr)
ext = ''.join(path.suffixes)
# strip off all file suffixes to get just the bare name
while path.suffix:
path = pathlib.Path(path.stem)
filename = str(path)
pathhash = hashlib.sha1(pathstr.encode('utf-8')).hexdigest()
return f'{filename}_{pathhash}{ext}'
def _check_version(self, reported_version, tool):
# Based on regex for deprecated "legacy specifier" from PyPA packaging
# library. Use this to parse PEP-440ish specifiers with arbitrary
# versions.
_regex_str = r"""
(?P<operator>(==|!=|<=|>=|<|>|~=))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
normalize_version = self.find_function(tool, 'normalize_version', 'tools')
# Version is good if it matches any of the specifier sets in this list.
spec_sets = self.get('tool', tool, 'version')
for spec_set in spec_sets:
split_specs = [s.strip() for s in spec_set.split(",") if s.strip()]
specs_list = []
for spec in split_specs:
match = re.match(_regex, spec)
if match is None:
self.logger.warning(f'Invalid version specifier {spec}. Defaulting to =={spec}.')
operator = '=='
spec_version = spec
else:
operator = match.group('operator')
spec_version = match.group('version')
specs_list.append((operator, spec_version))
if normalize_version is None:
normalized_version = reported_version
normalized_specs = ','.join([f'{op}{ver}' for op, ver in specs_list])
else:
normalized_version = normalize_version(reported_version)
normalized_specs = ','.join([f'{op}{normalize_version(ver)}' for op, ver in specs_list])
try:
version = packaging.version.Version(normalized_version)
except packaging.version.InvalidVersion:
self.logger.error(f'Version {reported_version} reported by {tool} does not match standard.')
if normalize_version is None:
self.logger.error('Tool driver should implement normalize_version().')
else:
self.logger.error(f'normalize_version() returned invalid version {normalized_version}')
return False
try:
spec_set = packaging.specifiers.SpecifierSet(normalized_specs)
except packaging.specifiers.InvalidSpecifier:
self.logger.error(f'Version specifier set {normalized_specs} does not match standard.')
return False
if version in spec_set:
return True
allowedstr = '; '.join(spec_sets)
self.logger.error(f"Version check failed for {tool}. Check installation.")
self.logger.error(f"Found version {reported_version}, did not satisfy any version specifier set {allowedstr}.")
return False
###############################################################################
# Package Customization classes
###############################################################################
class YamlIndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(YamlIndentDumper, self).increase_indent(flow, False)
class SiliconCompilerError(Exception):
''' Minimal Exception wrapper used to raise sc runtime errors.
'''
def __init__(self, message):
super(Exception, self).__init__(message)
|
uchan/lib/utils.py | alanbato/tchan | 120 | 11121407 | <filename>uchan/lib/utils.py
import time
from werkzeug.exceptions import abort
def now():
return int(time.time() * 1000)
def ip4_to_str(ip4):
outputs = []
for i in range(4):
n = (ip4 >> (3 - i) * 8) & 255
outputs.append(str(n))
return '.'.join(outputs)
def valid_id_range(id):
if type(id) != int or id <= 0 or id > 2 ** 32:
abort(400)
def get_cookie_domain(app):
"""Helpful helper method that returns the cookie domain that should
be used for the session cookie if session cookies are used.
"""
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop of the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flask documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
# Returns the path for which the cookie should be valid. The
# default implementation uses the value from the SESSION_COOKIE_PATH``
# config var if it's set, and falls back to ``APPLICATION_ROOT`` or
# uses ``/`` if it's `None`.
path = app.config['SESSION_COOKIE_PATH'] or app.config['APPLICATION_ROOT'] or '/'
if path != '/':
rv = rv.lstrip('.')
return rv
|
paddlespeech/t2s/modules/residual_block.py | jerryuhoo/PaddleSpeech | 1,379 | 11121421 | <reponame>jerryuhoo/PaddleSpeech
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any
from typing import Dict
from typing import List
import paddle
from paddle import nn
from paddle.nn import functional as F
from paddlespeech.t2s.modules.activation import get_activation
class WaveNetResidualBlock(nn.Layer):
"""A gated activation unit composed of an 1D convolution, a gated tanh
unit and parametric redidual and skip connections. For more details,
refer to `WaveNet: A Generative Model for Raw Audio <https://arxiv.org/abs/1609.03499>`_.
Args:
kernel_size (int, optional): Kernel size of the 1D convolution, by default 3
residual_channels (int, optional): Feature size of the residual output(and also the input), by default 64
gate_channels (int, optional): Output feature size of the 1D convolution, by default 128
skip_channels (int, optional): Feature size of the skip output, by default 64
aux_channels (int, optional): Feature size of the auxiliary input (e.g. spectrogram), by default 80
dropout (float, optional): Probability of the dropout before the 1D convolution, by default 0.
dilation (int, optional): Dilation of the 1D convolution, by default 1
bias (bool, optional): Whether to use bias in the 1D convolution, by default True
use_causal_conv (bool, optional): Whether to use causal padding for the 1D convolution, by default False
"""
def __init__(self,
kernel_size: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
aux_channels: int=80,
dropout: float=0.,
dilation: int=1,
bias: bool=True,
use_causal_conv: bool=False):
super().__init__()
self.dropout = dropout
if use_causal_conv:
padding = (kernel_size - 1) * dilation
else:
assert kernel_size % 2 == 1
padding = (kernel_size - 1) // 2 * dilation
self.use_causal_conv = use_causal_conv
self.conv = nn.Conv1D(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias_attr=bias)
if aux_channels is not None:
self.conv1x1_aux = nn.Conv1D(
aux_channels, gate_channels, kernel_size=1, bias_attr=False)
else:
self.conv1x1_aux = None
gate_out_channels = gate_channels // 2
self.conv1x1_out = nn.Conv1D(
gate_out_channels, residual_channels, kernel_size=1, bias_attr=bias)
self.conv1x1_skip = nn.Conv1D(
gate_out_channels, skip_channels, kernel_size=1, bias_attr=bias)
def forward(self, x, c):
"""
Args:
x (Tensor): the input features. Shape (N, C_res, T)
c (Tensor): the auxiliary input. Shape (N, C_aux, T)
Returns:
res (Tensor): Shape (N, C_res, T), the residual output, which is used as the
input of the next ResidualBlock in a stack of ResidualBlocks.
skip (Tensor): Shape (N, C_skip, T), the skip output, which is collected among
each layer in a stack of ResidualBlocks.
"""
x_input = x
x = F.dropout(x, self.dropout, training=self.training)
x = self.conv(x)
x = x[:, :, x_input.shape[-1]] if self.use_causal_conv else x
if c is not None:
c = self.conv1x1_aux(c)
x += c
a, b = paddle.chunk(x, 2, axis=1)
x = paddle.tanh(a) * F.sigmoid(b)
skip = self.conv1x1_skip(x)
res = (self.conv1x1_out(x) + x_input) * math.sqrt(0.5)
return res, skip
class HiFiGANResidualBlock(nn.Layer):
"""Residual block module in HiFiGAN."""
def __init__(
self,
kernel_size: int=3,
channels: int=512,
dilations: List[int]=(1, 3, 5),
bias: bool=True,
use_additional_convs: bool=True,
nonlinear_activation: str="leakyrelu",
nonlinear_activation_params: Dict[str, Any]={"negative_slope": 0.1},
):
"""Initialize HiFiGANResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels for convolution layer.
dilations (List[int]): List of dilation factors.
use_additional_convs (bool): Whether to use additional convolution layers.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
"""
super().__init__()
self.use_additional_convs = use_additional_convs
self.convs1 = nn.LayerList()
if use_additional_convs:
self.convs2 = nn.LayerList()
assert kernel_size % 2 == 1, "Kernel size must be odd number."
for dilation in dilations:
self.convs1.append(
nn.Sequential(
get_activation(nonlinear_activation, **
nonlinear_activation_params),
nn.Conv1D(
channels,
channels,
kernel_size,
1,
dilation=dilation,
bias_attr=bias,
padding=(kernel_size - 1) // 2 * dilation, ), ))
if use_additional_convs:
self.convs2.append(
nn.Sequential(
get_activation(nonlinear_activation, **
nonlinear_activation_params),
nn.Conv1D(
channels,
channels,
kernel_size,
1,
dilation=1,
bias_attr=bias,
padding=(kernel_size - 1) // 2, ), ))
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, channels, T).
"""
for idx in range(len(self.convs1)):
xt = self.convs1[idx](x)
if self.use_additional_convs:
xt = self.convs2[idx](xt)
x = xt + x
return x
|
cluster/example.py | SunGuo/500lines | 134 | 11121436 | <filename>cluster/example.py
import sys
import logging
from fleet import Ship
def key_value_state_machine(state, input_value):
print input_value, state
if input_value[0] == 'get':
return state, state.get(input_value[1], None)
elif input_value[0] == 'set':
state[input_value[1]] = input_value[2]
return state, input_value[2]
def main():
logging.basicConfig(format="%(asctime)s - %(name)s - %(message)s", level=logging.WARNING)
if sys.argv[1] == '--seed':
sys.argv.pop(1)
seed = {}
else:
seed = None
ship = Ship(state_machine=key_value_state_machine,
port=int(sys.argv[1]), peers=['127.0.0.1-%s' % p for p in sys.argv[2:]],
seed=seed)
ship.start()
for event in ship.events():
print event
old = ship.invoke(('get', sys.argv[1])) or 0
print "got", old
ship.invoke(('set', sys.argv[1], old + 1))
if __name__ == "__main__":
main()
|
bibliopixel/util/platform.py | rec/leds | 253 | 11121437 | import platform, subprocess
MAC = 'Darwin'
WINDOWS = 'Windows'
CPUINFO_FILE = '/proc/cpuinfo'
class Platform:
def __init__(self):
self.platform = platform.system()
self.version = platform.version()
self.release = platform.release()
self.python_version = platform.python_version()
try:
self.cpuinfo = [i.strip() for i in open(CPUINFO_FILE)]
except:
self.cpuinfo = []
def is_rpi_line(i):
return i.startswith('Hardware') and i.endswith('BCM2708')
self.is_raspberry_pi = any(is_rpi_line(i) for i in self.cpuinfo)
self.is_linux = (self.platform == 'linux')
platform_version = ()
if self.is_linux:
# Use the linux distribution as the name
self.platform = platform.linux_distribution()[0].lower()
elif self.platform == WINDOWS:
platform_version = platform.win32_ver()
elif self.platform == MAC:
release, versioninfo, machine = platform.mac_ver()
platform_version = release, machine
# https://boklee.blogspot.com/2012/05/how-to-retrieve-cpuinfo-on-os-x.html
for i in 'features', 'brand_string':
s = subprocess.check_output(('sysctl', 'machdep.cpu.' + i))
self.cpuinfo.append(s.decode().strip())
self.platform_version = ':'.join(platform_version)
|
custom_components/trakt/const.py | ProConvenience1/sensor.trakt | 288 | 11121442 | <gh_stars>100-1000
"""Constants used in the Trakt integration."""
DOMAIN = "trakt"
OAUTH2_AUTHORIZE = "https://api-v2launch.trakt.tv/oauth/authorize"
OAUTH2_TOKEN = "https://api-v2launch.trakt.tv/oauth/token"
ATTRIBUTION = "Data provided by trakt.tv"
CONF_DAYS = "days"
CONF_EXCLUDE = "exclude"
DATA_UPDATED = "trakt_data_updated"
DEFAULT_DAYS = 30
DEFAULT_SCAN_INTERVAL = 60
DEFAULT_NAME = "Trakt Upcoming Calendar"
CARD_DEFAULT = {
"title_default": "$title",
"line1_default": "$episode",
"line2_default": "$release",
"line3_default": "$rating - $runtime",
"line4_default": "$number - $studio",
"icon": "mdi:arrow-down-bold",
}
|
recipes/Python/577485_Self_Extracting_Archiver/recipe-577485.py | tdiprima/code | 2,023 | 11121451 | """Command-line tool for making self-extracting Python file.
Call this program from your command line with one argument:
(1) the file that you want to pack and compress
(2) the output will be a file with a pyw ending
The output can run on Windows where Python is installed."""
################################################################################
import sys
import os.path
import bz2
import zlib
import base64
################################################################################
def main():
"Extract the command-line arguments and run the packer."
try:
pack(sys.argv[1])
except (IndexError, AssertionError):
print('Usage: {} <filename>'.format(os.path.basename(sys.argv[0])))
def pack(path):
"Get the source, compress it, and create a packed file."
data = read_file(path)
builder, data = optimize(data)
with open(os.path.splitext(path)[0] + '.pyw', 'w') as file:
builder(os.path.basename(path), base64.b64encode(data), file)
def read_file(path):
"Read the entire file content from path in binary mode."
assert os.path.isfile(path)
with open(path, 'rb') as file:
return file.read()
def optimize(data):
"Compress the data and select the best method to write."
bz2_data = bz2.compress(data, 9)
zlib_data = zlib.compress(data, 9)
sizes = tuple(map(len, (data, bz2_data, zlib_data)))
smallest = sizes.index(min(sizes))
if smallest == 1:
return build_bz2_extractor, bz2_data
if smallest == 2:
return build_zlib_extractor, zlib_data
return build_b64_extractor, data
################################################################################
def build_bz2_extractor(filename, data, file):
"Write a Python program that uses bz2 data compression."
print("import base64, bz2, os", file=file)
print("data =", data, file=file)
print("with open({!r}, 'wb') as file:".format(filename), file=file)
print(" file.write(bz2.decompress(base64.b64decode(data)))", file=file)
print("os.startfile({!r})".format(filename), file=file)
def build_zlib_extractor(filename, data, file):
"Pack data into a self-extractor with zlib compression."
print("import base64, zlib, os", file=file)
print("data =", data, file=file)
print("with open({!r}, 'wb') as file:".format(filename), file=file)
print(" file.write(zlib.decompress(base64.b64decode(data)))", file=file)
print("os.startfile({!r})".format(filename), file=file)
def build_b64_extractor(filename, data, file):
"Create a Python file that may not utilize compression."
print("import base64, os", file=file)
print("data =", data, file=file)
print("with open({!r}, 'wb') as file:".format(filename), file=file)
print(" file.write(base64.b64decode(data))", file=file)
print("os.startfile({!r})".format(filename), file=file)
################################################################################
if __name__ == '__main__':
main()
# Small Program Version
# import bz2,base64 as a,os.path as b,sys,zlib;c=sys.argv[1]
# with open(c,'rb') as d:d=d.read();e,f=bz2.compress(d),zlib.compress(d,9);g=list(map(len,(d,e,f)));g,h,i,j,k,l=g.index(min(g)),'import base64 as a,os','\nwith open({0!r},"wb") as b:b.write(','.decompress(','a.b64decode({1}))',';os.startfile({0!r})'
# if g==1:d,e=e,h+',bz2'+i+'bz2'+j+k+')'+l
# elif g==2:d,e=f,h+',zlib'+i+'zlib'+j+k+')'+l
# else:e=h+i+k+l
# with open(b.splitext(c)[0]+'.pyw','w') as f:f.write(e.format(b.basename(c),a.b64encode(d)))
|
question/migrations/0002_down_vote_total_view.py | yazdanv/backend | 232 | 11121473 | # Generated by Django 2.1.5 on 2019-07-31 19:44
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('question', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='answer',
name='down_vote',
field=models.ManyToManyField(related_name='answer_down_votes', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='question',
name='down_vote',
field=models.ManyToManyField(related_name='question_down_votes', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='question',
name='total_view',
field=models.IntegerField(default=0),
),
]
|
mmhuman3d/data/data_converters/mpii.py | ykk648/mmhuman3d | 472 | 11121502 | <filename>mmhuman3d/data/data_converters/mpii.py
import os
from typing import List
import h5py
import numpy as np
from tqdm import tqdm
from mmhuman3d.core.conventions.keypoints_mapping import convert_kps
from mmhuman3d.data.data_structures.human_data import HumanData
from .base_converter import BaseConverter
from .builder import DATA_CONVERTERS
@DATA_CONVERTERS.register_module()
class MpiiConverter(BaseConverter):
"""MPII Dataset `2D Human Pose Estimation: New Benchmark and State of the
Art Analysis' CVPR'2014. More details can be found in the `paper.
<http://human-pose.mpi-inf.mpg.de/contents/andriluka14cvpr.pdf>`__ .
"""
@staticmethod
def center_scale_to_bbox(center: float, scale: float) -> List[float]:
"""Obtain bbox given center and scale."""
w, h = scale * 200, scale * 200
x, y = center[0] - w / 2, center[1] - h / 2
return [x, y, w, h]
def convert(self, dataset_path: str, out_path: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where raw images and
annotations are stored.
out_path (str): Path to directory to save preprocessed npz file
Returns:
dict:
A dict containing keys image_path, bbox_xywh, keypoints2d,
keypoints2d_mask stored in HumanData() format
"""
# use HumanData to store all data
human_data = HumanData()
# structs we use
image_path_, bbox_xywh_, keypoints2d_ = [], [], []
# annotation files
annot_file = os.path.join(dataset_path, 'train.h5')
# read annotations
f = h5py.File(annot_file, 'r')
centers, image_path, keypoints2d, scales = \
f['center'], f['imgname'], f['part'], f['scale']
# go over all annotated examples
for center, imgname, keypoints2d16, scale in tqdm(
zip(centers, image_path, keypoints2d, scales)):
imgname = imgname.decode('utf-8')
# check if all major body joints are annotated
if (keypoints2d16 > 0).sum() < 2 * 16:
continue
# keypoints
keypoints2d16 = np.hstack([keypoints2d16, np.ones([16, 1])])
# bbox
bbox_xywh = self.center_scale_to_bbox(center, scale)
# store data
image_path_.append(os.path.join('images', imgname))
bbox_xywh_.append(bbox_xywh)
keypoints2d_.append(keypoints2d16)
bbox_xywh_ = np.array(bbox_xywh_).reshape((-1, 4))
bbox_xywh_ = np.hstack([bbox_xywh_, np.ones([bbox_xywh_.shape[0], 1])])
keypoints2d_ = np.array(keypoints2d_).reshape((-1, 16, 3))
keypoints2d_, mask = convert_kps(keypoints2d_, 'mpii', 'human_data')
human_data['image_path'] = image_path_
human_data['bbox_xywh'] = bbox_xywh_
human_data['keypoints2d_mask'] = mask
human_data['keypoints2d'] = keypoints2d_
human_data['config'] = 'mpii'
human_data.compress_keypoints_by_mask()
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpii_train.npz')
human_data.dump(out_file)
|
algorithm/backtracking_examples.py | ganeshskudva/Algorithm_Templates | 190 | 11121506 | <gh_stars>100-1000
from collections import Counter
import re
# [46] https://leetcode.com/problems/permutations/
# Given a collection of distinct integers, return all possible permutations.
def permute(nums):
def backtrack(first=0):
# if all integers are used up
if first == n:
output.append(nums[:])
for i in range(first, n):
# place i-th integer first
# in the current permutation
nums[first], nums[i] = nums[i], nums[first]
# use next integers to complete the permutations
backtrack(first + 1)
# backtrack
nums[first], nums[i] = nums[i], nums[first]
n = len(nums)
output = []
backtrack()
return output
# [51] https://leetcode.com/problems/n-queens/
# Given an integer n, return all distinct solutions to the n-queens puzzle.
def solveNQueens(n):
result = []
def backtracking(queens, xy_diff, xy_sums):
p = len(queens)
if p == n:
result.append(queens)
return
for q in range(n):
if q not in queens and p - q not in xy_diff and p + q not in xy_sums:
backtracking(queens + [q], xy_diff | {p - q}, xy_sums | {p + q})
backtracking([], set(), set())
return [['.' * i + 'Q' + '.' * (n - i - 1) for i in queen] for queen in result]
# [37] https://leetcode.com/problems/sudoku-solver/
# Write a program to solve a Sudoku puzzle by filling the empty cells.
#
# easy-understanding version, not a efficient solution
# optimize: use priority queue and bit-manipulation
def solveSudoku(board):
stack = [(i, j) for i in range(9) for j in range(9) if board[i][j] == "."]
def dfs():
if not stack:
return
x, y = stack.pop()
box = [board[x // 3 * 3 + i][y // 3 * 3 + j] for i in range(3) for j in range(3)]
row = [board[x][j] for j in range(9)]
col = [board[i][y] for i in range(9)]
for i in "123456789":
if not any([i in box, i in col, i in row]):
board[x][y] = i
dfs()
if not stack:
return
board[x][y] = "."
stack.append((x, y))
dfs()
# [79] https://leetcode.com/problems/word-search/
# Given a 2D board and a word, find if the word exists in the grid.
def exist(board: 'List[List[str]]', word: str) -> bool:
m, n = len(board), len(board[0])
bcnts = Counter(c for r in board for c in r)
for w, w_cnt in Counter(word).items():
if w not in bcnts or w_cnt > bcnts[w]:
return False
def backtrack(i, j, index):
if index == len(word) - 1:
return True
# mark it as visited
board[i][j] = '*'
for dx, dy in (0, 1), (1, 0), (0, -1), (-1, 0):
next_i, next_j = i + dx, j + dy
# check before dfs
if 0 <= next_i < m and 0 <= next_j < n and word[index + 1] == board[next_i][next_j] and backtrack(
next_i, next_j, index + 1):
return True
# revert the state
board[i][j] = word[index]
return False
for i in range(m):
for j in range(n):
if board[i][j] == word[0] and backtrack(i, j, 0):
return True
return False
# [351] https://leetcode.com/problems/android-unlock-patterns/
# Given an Android 3x3 key lock screen and two integers m and n, where 1 ≤ m ≤ n ≤ 9, count the total number of
# unlock patterns of the Android lock screen, which consist of minimum of m keys and maximum n keys.
def numberOfPatterns(m: int, n: int) -> int:
through_dict = {(1, 3): 2, (4, 6): 5, (7, 9): 8, (1, 7): 4, (2, 8): 5, (3, 9): 6, (1, 9): 5, (3, 7): 5}
res = 0
def backtracking(last, used: set, left: set):
nonlocal res
if len(used) > n:
return
if m <= len(used) <= n:
res += 1
for num in left:
if last:
key = (last, num) if last < num else (num, last)
if key in through_dict:
if through_dict[key] in left:
continue
used.add(num)
left.remove(num)
backtracking(num, used, left)
left.add(num)
used.remove(num)
backtracking(None, set(), {i for i in range(1, 10)})
return res
# [90] https://leetcode.com/problems/subsets-ii/
# Given a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).
def subsetsWithDup(nums: 'List[int]') -> 'List[List[int]]':
res = []
nums.sort()
def backtracking(start, path):
# abandon rest numbers
res.append(path)
for i in range(start, len(nums)):
# duplicate element will only add the first one, and skip all nums after it.
# equivalent to internal serial number for same element
if i > start and nums[i] == nums[i - 1]:
continue
backtracking(i + 1, path + [nums[i]])
backtracking(0, [])
return res
# [10] https://leetcode.com/problems/regular-expression-matching/
# Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'.
#
# The key point to enhance performance is pre-processing pattern
# specific optimization, not very scalable, but efficient for this solution.
def isMatch(s, p):
pattern = re.compile(r'[a-z.]\*?')
patterns = re.findall(pattern, p)
# specific optimization, not scalable, but efficient for this solution
# pre-process patterns, merge same or including patterns
def preProcess(patterns):
# .* merge all adjacent x* pattern
p_count, p_index = 0, -1
# count every time after update patterns
while p_count < patterns.count('.*'):
index = patterns.index('.*', p_index + 1)
index_l, index_r = index - 1, index + 1
while index_l >= 0 and len(patterns[index_l]) == 2:
index_l -= 1
while index_r < len(patterns) and len(patterns[index_r]) == 2:
index_r += 1
patterns = patterns[0:index_l + 1] + patterns[index:index + 1] + patterns[index_r:]
# update p_index after merge
p_index = patterns.index('.*', p_index + 1)
p_count += 1
# merge a-z* merge all adjacent corresponding a-z and a-z*
start_index, i, flag, pattern_ch, new_patterns = 0, 0, False, '', []
for i, pat in enumerate(patterns):
if pattern_ch != pat or pattern_ch[0] == '.':
if flag:
new_patterns.append(pattern_ch)
else:
new_patterns.extend(patterns[start_index:i])
flag = len(pat) == 2
start_index = i
pattern_ch = pat
elif not flag and len(pat) == 2:
flag = True
if flag:
new_patterns.append(pattern_ch)
else:
new_patterns.extend(patterns[start_index:i + 1])
return new_patterns
# match pattern by backtracking
def isMatchPatterns(s, patterns, index):
# if patterns has been matched out, check whether reach the end of s
if len(patterns) == 0:
return index >= len(s)
# if there are remain patterns, if all the remains like x*, match success, otherwise failed.
if index >= len(s):
return all(len(p) > 1 for p in patterns)
p = patterns[0]
if len(p) == 1:
# when single pattern, if encounter same char or '.', match success, otherwise failed
if p[0] == s[index] or p[0] == '.':
return isMatchPatterns(s, patterns[1:], index + 1)
else:
return False
elif len(p) == 2:
# when pattern with *, if encounter same char or '.', match success, otherwise failed
if p[0] == s[index] or p[0] == '.':
# when match success, you can continue to use this pattern, or abandon this and match next pattern.
return isMatchPatterns(s, patterns, index + 1) or isMatchPatterns(s, patterns[1:], index)
# when it failed, match next pattern, not return false, because * can match zero char.
else:
return isMatchPatterns(s, patterns[1:], index)
return isMatchPatterns(s, preProcess(patterns), 0)
|
dump_match/dataset.py | hoverinc/OANet | 209 | 11121512 | <gh_stars>100-1000
import h5py
import os
import pickle
import numpy as np
from sequence import Sequence
class Dataset(object):
def __init__(self, dataset_path, dump_dir, dump_file, seqs, mode, desc_name, vis_th, pair_num, pair_path=None):
self.dataset_path = dataset_path
self.dump_dir = dump_dir
self.dump_file = os.path.join(dump_dir, dump_file)
self.seqs = seqs
self.mode = mode
self.desc_name = desc_name
self.vis_th = vis_th
self.pair_num = pair_num
self.pair_path = pair_path
self.dump_data()
def collect(self):
data_type = ['xs','ys','Rs','ts', 'ratios', 'mutuals',\
'cx1s', 'cy1s', 'cx2s', 'cy2s', 'f1s', 'f2s']
pair_idx = 0
with h5py.File(self.dump_file, 'w') as f:
data = {}
for tp in data_type:
data[tp] = f.create_group(tp)
for seq in self.seqs:
print(seq)
data_seq = {}
for tp in data_type:
data_seq[tp] = pickle.load(open(self.dump_dir+'/'+seq+'/'+self.desc_name+'/'+self.mode+'/'+str(tp)+'.pkl','rb'))
seq_len = len(data_seq['xs'])
for i in range(seq_len):
for tp in data_type:
data_item = data_seq[tp][i]
if tp in ['cx1s', 'cy1s', 'cx2s', 'cy2s', 'f1s', 'f2s']:
data_item = np.asarray([data_item])
data_i = data[tp].create_dataset(str(pair_idx), data_item.shape, dtype=np.float32)
data_i[:] = data_item.astype(np.float32)
pair_idx = pair_idx + 1
print('pair idx now ' +str(pair_idx))
def dump_data(self):
# make sure you have already saved the features
for seq in self.seqs:
pair_name = None if self.pair_path is None else self.pair_path+'/'+seq.rstrip("/")+'-te-'+str(self.pair_num)+'-pairs.pkl'
dataset_path = self.dataset_path+'/'+seq+'/'+self.mode
dump_dir = self.dump_dir+'/'+seq+'/'+self.desc_name+'/'+self.mode
print(dataset_path)
dataset = Sequence(dataset_path, dump_dir, self.desc_name, self.vis_th, self.pair_num, pair_name)
print('dump intermediate files.')
dataset.dump_intermediate()
print('dump matches.')
dataset.dump_datasets()
print('collect pkl.')
self.collect()
|
yamale/validators/__init__.py | basnijholt/Yamale | 457 | 11121602 | from .base import Validator
from .validators import *
|
Python/CodeCoverage/functions.py | Gjacquenot/training-material | 115 | 11121606 | <gh_stars>100-1000
def fac_r(n):
if n < 2:
return 1
else:
return n*fac_r(n - 1)
def fac_i(n):
result = 1
for i in range(2, n + 1):
result *= i
return result
|
utils/__init__.py | yujiatay/deep-motion-editing | 966 | 11121615 | import sys
import os
BASEPATH = os.path.dirname(__file__)
sys.path.insert(0, BASEPATH)
|
plenum/server/consensus/batch_id.py | jandayanan/indy-plenum | 148 | 11121617 |
# `view_no` is a view no is the current view_no, but `pp_view_no` is a view no when the given PrePrepare has been
# initially created and applied
# it's critical to keep the original view no to correctly create audit ledger transaction
# (since PrePrepare's view no is present there)
# An example when `view_no` != `pp_view_no`, is when view change didn't finish at first round
# (next primary is unavailable for example)
from typing import NamedTuple
BatchID = NamedTuple('BatchID', [('view_no', int), ('pp_view_no', int), ('pp_seq_no', int), ('pp_digest', str)])
|
tests/test_functional.py | filipmu/fastaudio | 152 | 11121633 | import torch
from fastai.data.all import test_eq as _test_eq
from unittest.mock import patch
from fastaudio.augment.functional import region_mask
class TestCreateRegionMask:
def test_shape(self):
_test_eq(region_mask(1, 5, 7, 10).shape, (1, 10))
_test_eq(region_mask(2, 3, 7, 12).shape, (2, 12))
_test_eq(region_mask(4, 0, 3, 3).shape, (4, 3))
def test_max(self):
# Test max size
with patch(
"torch.rand",
side_effect=[
torch.Tensor([[[[1.0]]]]),
torch.Tensor([[[[0.0]]]]),
],
):
_test_eq(
region_mask(1, 4, 6, 10),
torch.BoolTensor([[[[1] * 6 + [0] * 4]]]),
)
def test_min(self):
# Test min size
with patch(
"torch.rand",
side_effect=[
torch.Tensor([0.0]),
# Test start middle start here too
torch.Tensor([0.5]),
],
):
_test_eq(
region_mask(1, 4, 6, 10),
torch.BoolTensor([0] * 3 + [1] * 4 + [0] * 3),
)
def test_multiple_masks(self):
# Test multiple masks
with patch(
"torch.rand",
side_effect=[
torch.Tensor([[1.0], [0.0]]),
torch.Tensor([[0.0], [0.5]]),
],
):
_test_eq(
region_mask(2, 4, 6, 10),
torch.BoolTensor([[1] * 6 + [0] * 4, [0] * 3 + [1] * 4 + [0] * 3]),
)
|
ztag/annotations/scannex.py | justinbastress/ztag | 107 | 11121636 | <gh_stars>100-1000
from ztag.annotation import *
class NetGearSmartSwitch(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
def process(self, obj, meta):
if obj["title"] == "ip.buffer webserver":
meta.global_metadata.manufacturer = Manufacturer.SCANNEX
meta.global_metadata.product = "ip.buffer"
meta.global_metadata.device_type = Type.SCADA_GATEWAY
meta.tags.add("embedded")
return meta
|
HunterCelery/model/ldap_config.py | tt9133github/hunter | 322 | 11121664 | #!/ usr/bin/env
# coding=utf-8
#
# Copyright 2019 ztosec & https://www.zto.com/
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
author: b5mali4
"""
import threading
from peewee import *
from common.mysql_util import MysqlManage
from model.hunter_model import HunterModel, HunterModelService
class LdapConfig(HunterModel):
"""
LDAP 配置,同于定时同步账号到本地数据库,只能有一条记录
"""
ldap_host = TextField(null=True)
bind_dn = TextField(null=True)
bind_dn_password = TextField(null=True)
base_dn = TextField(null=True)
search_filter = TextField(null=True)
user_name_field = TextField(null=True)
full_name_field = TextField(null=True)
email_field = TextField(null=True)
dept_name_field = TextField(null=True)
mobile_field = TextField(null=True)
ldap_switch = BooleanField(default=False)
class Meta:
database = MysqlManage.get_database()
class LdapConfigService:
"""
ldap认证配置服务
"""
__ldap_config_single = None
_instance_lock = threading.Lock()
@staticmethod
def get_fields_by_where(**kwargs):
"""
To use:
>>> ldap_config = LdapConfigService.get_fields_by_where(fields=(LdapConfig.ldap_host), where=(LdapConfig.id == 1))
>>> print(ldap_config)
:param kwargs:
:return:
"""
return HunterModelService.get_fields_by_where(LdapConfig, **kwargs)
@staticmethod
def count(**kwargs):
"""
数据数量
To use:
>>> LdapConfigService.count(where=(LdapConfig.id == 1))
:param kwargs:
:return:
"""
return HunterModelService.count(LdapConfig, **kwargs)
@staticmethod
def update(**kwargs):
"""
更新操作,更新操作之后,需要对单列进行赋值
To use:
>>> LdapConfigService.update(fields=({LdapConfig.ldap_host: "777" }))
:param kwargs:
:return:
"""
result = HunterModelService.update(LdapConfig, **kwargs)
LdapConfigService.get_single_instance(True)
return result
@staticmethod
def save(**kwargs):
"""
保存操作,不做第二次
To use:
>>> LdapConfigService.save(ldap_host="ldap://127.0.0.1")
:param kwargs:
:return:
"""
return HunterModelService.save(LdapConfig, **kwargs)
@staticmethod
def get_single_instance(refresh=False):
"""
获取单列
:param refresh:
:return:
"""
with LdapConfigService._instance_lock:
if refresh or LdapConfigService.__ldap_config_single is None:
LdapConfigService.__ldap_config_single = LdapConfigService.get_fields_by_where()[0]
return LdapConfigService.__ldap_config_single
|
test/utils_tests.py | seba-1511/randopt | 115 | 11121666 | <reponame>seba-1511/randopt
#!/usr/bin/env python3
import os
import unittest
import randopt as ro
class TestUtils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_dict_to_list(self):
dictionary = dict(asdf=23, yxcv='vcxy', qwer=1)
ref = ['asdf23', 'qwer1', 'yxcvvcxy']
res = ro.dict_to_list(dictionary)
self.assertEqual(ref, res)
def test_dict_to_constants(self):
dictionary = dict(asdf=23, yxcv='vcxy', qwer=1)
res = ro.dict_to_constants(dictionary)
self.assertTrue(isinstance(res, dict))
for key, value in res.items():
self.assertTrue(isinstance(key, str))
self.assertTrue(isinstance(value, ro.Constant))
def test_dict_to_path(self):
dictionary = dict(asdf=23, yxcv='vcxy', qwer=1)
res = ro.dict_to_path(dictionary)
subs = res.split('/')
for sub in subs:
self.assertTrue(len(sub) < 255)
ref = ro.dict_to_list(dictionary)
self.assertEqual(subs, ref)
def test_dict_to_(self):
dictionary = dict(asdf=23, yxcv='vcxy', qwer=1)
res = ro.dict_to_string(dictionary)
subs = res.split('-')
ref = ro.dict_to_list(dictionary)
self.assertEqual(subs, ref)
if __name__ == '__main__':
unittest.main()
|
datasets/imagenet/scripts/imagenet.py | dgtlmoon/deepdetect | 1,672 | 11121672 | import os, argparse, glob, sys, subprocess
from collections import defaultdict
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class Synset:
'A representation of a category, aka synset'
_name = ''
_desc = ''
_syn = ''
_loc = ''
_img_count = 0 # number of images in synset
_imgs = []
_size = 0
_parent = ''
_children = []
def __init__(self, loc):
self._loc = loc
self._syn = os.path.basename(os.path.normpath(loc))
def print_synset(self):
print '----------------------'
print self._syn
print self._name
print self._desc
print self._img_count, "images"
print sizeof_fmt(self._size)
print '----------------------'
def load_words(wordsfile):
words = {}
with open(wordsfile) as f:
words = dict(x.rstrip().split(None, 1) for x in f)
return words
def load_descs(descfile):
descs = {}
with open(descfile) as f:
descs = dict(x.rstrip().split(None,1) for x in f)
return descs
def load_treemap(treemapfile):
tdict = defaultdict(list)
with open(treemapfile) as f:
for line in f:
ls = line.rstrip().split(' ')
tdict[ls[0]].append(ls[1])
return tdict
def read_synsets(alldirs,synsets,descs,search,lsynsets):
synsetsobj = {}
for d in alldirs:
s = Synset(d)
if lsynsets:
if not s._syn in lsynsets:
continue
s._name = synsets[s._syn]
if search:
if not search in s._name:
continue
s._desc = descs[s._syn]
s._imgs = glob.glob(d + "/*")
s._img_count = len(s._imgs)
s._size = sum(os.path.getsize(f) for f in s._imgs if os.path.isfile(f))
synsetsobj[s._syn] = s
return synsetsobj
def find_treemap(lsyn,tmap):
# - iterate lsyn
# - for each key get the subsynets
# - if no subsynets add to temporary lsyn
# - otherwise remove key from lsyn (if fact only if no image, so we leave it for now)
# - merge lsyn with temporary lsyn
clsyn = lsyn
tlsyn = []
for key in lsyn:
ls = tmap[key]
if ls:
#tlsyn.remove(key)
for l in ls:
#tlsyn.append(l)
ttlsyn = []
ttlsyn.append(l)
ttlsyn = find_treemap(ttlsyn,tmap)
#print 'ttlsyn=',ttlsyn
tlsyn = tlsyn + ttlsyn
#print 'tlsyn=',tlsyn
lsyn = clsyn + tlsyn
return lsyn
def write_dict(files,ffile):
f = open(ffile,'w')
for key in files:
line = str(key) + ' ' + str(files[key]) + '\n'
f.write(line)
parser = argparse.ArgumentParser(description='Imagenet processing tools')
parser.add_argument('repository',type=str,help='location of the imagenet repository')
parser.add_argument('--list',dest='list',action='store_true',help='list repository, read-only')
parser.add_argument('--dataset',dest='dataset',type=str,help='location of a dataset to be created based on search terms (--search) or list (--synsets) of synsets')
parser.add_argument('--trainperc',dest='trainperc',type=float,help='% of the dataset to be used as training set')
parser.add_argument('--search',dest='search',type=str,default='',help='search for synsets whose name contains the search term')
parser.add_argument('--synsets',dest='synsets',type=str,help='list of synsets, possibly in a file, to be looked up')
parser.add_argument('--subsynsets',dest='subsynsets',type=str,default='none',help='use treemaps to retrieve synsets that are part of a higher level synset')
args = parser.parse_args()
allsynsets = load_words('words.txt')
alldescs = load_descs('gloss.txt')
alldirs = glob.glob(args.repository + "/n*")
print "Found", len(alldirs), "image repositories as synsets"
lsynsets = {}
if args.synsets:
if not '.' in args.synsets: # not a file
l = args.synsets.split(',')
for e in l:
lsynsets[e] = 1
else:
with open(args.synsets) as f:
lsynsets = dict(x.rstrip().split(None,1) for x in f)
if not args.subsynsets == 'none' and not args.subsynsets == '':
lsynsets[args.subsynsets] = 1
allsynsetsobj = read_synsets(alldirs,allsynsets,alldescs,args.search,lsynsets)
print "Found", len(allsynsetsobj), "relevant synsets"
if not args.subsynsets == 'none':
treemap = load_treemap('wordnet.is_a.txt')
lsyn = []
for key,value in allsynsetsobj.items():
for l in treemap[key]:
lsyn.append(l)
lsyn = find_treemap(lsyn,treemap)
#print len(lsyn)
subsynsetsobj = read_synsets(alldirs,allsynsets,alldescs,'',lsyn)
allsynsetsobj = dict(allsynsetsobj,**subsynsetsobj)
if args.list:
totalsize = 0
for key,value in allsynsetsobj.items():
value.print_synset()
totalsize = totalsize + value._size
print "Found", len(allsynsetsobj), "relevant synsets"
print "Number of images:",sum(allsynsetsobj[o]._img_count for o in allsynsetsobj)
print "Total size: "+ sizeof_fmt(totalsize)
elif args.dataset:
try:
os.mkdir(args.dataset)
except:
pass
if not args.trainperc:
for key,value in allsynsetsobj.items():
os.symlink(value._loc,args.dataset + "/" + value._syn)
else:
print "Processing dataset", args.dataset
trainrep = 'train'
valrep = 'val'
trainpath = args.dataset + "/" + trainrep
valpath = args.dataset + "/" + valrep
trainfile = args.dataset + '/train.txt'
valfile = args.dataset + '/val.txt'
correspfile = args.dataset + '/corresp.txt'
tfiles = {}
vfiles = {}
corresp = {}
try:
os.mkdir(trainpath)
os.mkdir(valpath)
except:
pass
cl = 0
gifconverts = 0
for key,value in allsynsetsobj.items():
thresh = int(len(value._imgs)*args.trainperc/100.0)
train_list = value._imgs[0:thresh]
val_list = value._imgs[thresh:int(len(value._imgs))]
lpath = trainpath + "/" + value._syn
if not cl in corresp:
corresp[cl] = key + ' ' + value._name
try:
os.mkdir(lpath)
except:
pass
for f in train_list:
fname = os.path.basename(os.path.normpath(f))
if ".gif" in fname:
fname = fname + ".jpg"
convcmd = f + ' ' + trainpath + '/' + value._syn + '/' + fname
os.system("/usr/bin/convert " + convcmd)
gifconverts += 1
else:
os.symlink(f,trainpath + "/" + value._syn + "/" + fname)
tfiles[value._syn + '/' + os.path.basename(fname)] = cl
for f in val_list:
fname = os.path.basename(os.path.normpath(f))
if ".gif" in fname:
fname = fname + ".jpg"
convcmd = f + ' ' + valpath + '/' + os.path.basename(fname)
os.system("/usr/bin/convert " + convcmd)
gifconverts += 1
else:
os.symlink(f,valpath + "/" + os.path.basename(fname))
vfiles[os.path.basename(fname)] = cl
cl += 1
write_dict(corresp,correspfile)
write_dict(tfiles,trainfile)
write_dict(vfiles,valfile)
print "converted " + str(gifconverts) + " gif files"
|
release/scripts/presets/camera/Sony_F65.py | rbabari/blender | 365 | 11121709 | <gh_stars>100-1000
import bpy
bpy.context.camera.sensor_width = 24.33
bpy.context.camera.sensor_height = 12.83
bpy.context.camera.sensor_fit = 'HORIZONTAL'
|
earthpy/tests/test_epsg.py | nkorinek/earthpy | 350 | 11121721 | import pytest
import rasterio as rio
import os.path as op
import earthpy as et
import earthpy.spatial as es
from earthpy.io import path_to_example
@pytest.fixture
def output_dir(out_path):
return op.dirname(out_path)
def test_epsg():
"""Unit test for loading EPSG to Proj4 string dictionary."""
assert et.epsg["4326"] == "+proj=longlat +datum=WGS84 +no_defs"
def test_crs_check_tif():
"""Test crs check works properly."""
crs = es.crs_check(path_to_example("rmnp-rgb.tif"))
assert(crs.to_epsg() == 4326)
def test_crs_check_bad_file():
with pytest.raises(rio.errors.RasterioIOError, match="Oops, your data ar"):
es.crs_check(path_to_example("rmnp.shp"))
def test_no_crs_in_file(output_dir):
output_path = op.join(output_dir, "no_crs.tif")
with rio.open(et.io.path_to_example("green.tif")) as src:
data = src.read(1)
profile = src.profile
profile.update(crs=None)
with rio.open(output_path, 'w', **profile) as dst:
dst.write(data, 1)
with pytest.raises(ValueError, match="No CRS found in data. The raster "):
es.crs_check(output_path)
|
survae/tests/nn/nets/autoregressive/__init__.py | alisiahkoohi/survae_flows | 262 | 11121781 | <filename>survae/tests/nn/nets/autoregressive/__init__.py
from .made import *
from .pixelcnn import *
from .transformer import *
from .sparse_transformer import *
|
test_python_toolbox/test_path_tools/test_get_root_path_of_module.py | hboshnak/python_toolbox | 119 | 11121788 | <gh_stars>100-1000
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
from python_toolbox.path_tools import get_root_path_of_module
def test():
''' '''
import email.charset
assert get_root_path_of_module(email) == \
get_root_path_of_module(email.charset)
import python_toolbox.path_tools
assert get_root_path_of_module(python_toolbox) == \
get_root_path_of_module(python_toolbox.path_tools)
|
tests/test_terminal.py | edouard-lopez/colorful | 517 | 11121835 | # -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import os
import sys
import pytest
# do not overwrite module
os.environ['COLORFUL_NO_MODULE_OVERWRITE'] = '1'
import colorful.terminal as terminal # noqa
@pytest.mark.skipif(not sys.stdout.isatty(), reason='fails without a tty')
@pytest.mark.parametrize('env,expected', [
# test force color settings
({'COLORFUL_DISABLE': '1'}, terminal.NO_COLORS),
({'COLORFUL_FORCE_8_COLORS': '1'}, terminal.ANSI_8_COLORS),
({'COLORFUL_FORCE_16_COLORS': '1'}, terminal.ANSI_16_COLORS),
({'COLORFUL_FORCE_256_COLORS': '1'}, terminal.ANSI_256_COLORS),
({'COLORFUL_FORCE_TRUE_COLORS': '1'}, terminal.TRUE_COLORS),
# test recommended $COLORTERM variable
({'COLORTERM': 'truecolor'}, terminal.TRUE_COLORS),
({'COLORTERM': '24bit'}, terminal.TRUE_COLORS),
({'COLORTERM': '8bit'}, terminal.ANSI_256_COLORS),
({'COLORTERM': 'XYZ'}, terminal.ANSI_16_COLORS),
# test $TERM_PROGRAM variable
({'TERM_PROGRAM': 'iTerm.app'}, terminal.TRUE_COLORS),
({'TERM_PROGRAM': 'Hyper'}, terminal.TRUE_COLORS),
({'TERM_PROGRAM': 'Apple_Terminal'}, terminal.ANSI_256_COLORS),
# test $TERM variable values for 256 ANSI colors
({'TERM': 'screen-256'}, terminal.ANSI_256_COLORS),
({'TERM': 'screen-256color'}, terminal.ANSI_256_COLORS),
({'TERM': 'xterm-256'}, terminal.ANSI_256_COLORS),
({'TERM': 'xterm-256color'}, terminal.ANSI_256_COLORS),
# test $TERM variable values for 16 colors
({'TERM': 'screen'}, terminal.ANSI_16_COLORS),
({'TERM': 'xterm'}, terminal.ANSI_16_COLORS),
({'TERM': 'vt100'}, terminal.ANSI_16_COLORS),
({'TERM': 'color'}, terminal.ANSI_16_COLORS),
({'TERM': 'ansi'}, terminal.ANSI_16_COLORS),
({'TERM': 'cygwin'}, terminal.ANSI_16_COLORS),
({'TERM': 'linux'}, terminal.ANSI_16_COLORS),
# test fallback to 8 colors
({}, terminal.ANSI_8_COLORS),
# force disable overrules force colors
({
'COLORFUL_DISABLE': '1',
'COLORFUL_FORCE_8_COLORS': '1', 'COLORFUL_FORCE_16_COLORS': '1',
'COLORFUL_FORCE_256_COLORS': '1', 'COLORFUL_FORCE_TRUE_COLORS': '1'
}, terminal.NO_COLORS),
# force colors overrules $COLORTERM
({
'COLORFUL_FORCE_TRUE_COLORS': '1',
'COLORTERM': '24bit'
}, terminal.TRUE_COLORS),
# $COLORTERM overrules $TERM_PROGRAM
({
'COLORTERM': 'truecolor',
'TERM_PROGRAM': 'iTerm.app'
}, terminal.TRUE_COLORS),
# $TERM_PROGRAM overrules $TERM with 256 colors
({
'TERM_PROGRAM': 'iTerm.app',
'TERM': 'xterm-256color'
}, terminal.TRUE_COLORS)
])
def test_color_support_detection(env, expected):
"""
Test the terminal color support auto detection
"""
assert terminal.detect_color_support(env) == expected
|
usaspending_api/accounts/models/budget_authority.py | g4brielvs/usaspending-api | 217 | 11121855 | from django.db import models
class BudgetAuthority(models.Model):
agency_identifier = models.TextField(db_index=True) # aka CGAC
fr_entity_code = models.TextField(null=True, db_index=True) # aka FREC
year = models.IntegerField(null=False)
amount = models.BigIntegerField(null=True)
class Meta:
db_table = "budget_authority"
unique_together = (("agency_identifier", "fr_entity_code", "year"),)
|
RecoLocalTracker/SiStripRecHitConverter/python/SiStripRecHitConverter_cfi.py | ckamtsikis/cmssw | 852 | 11121875 | <reponame>ckamtsikis/cmssw<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from RecoLocalTracker.SiStripRecHitConverter.siStripRecHitConverter_cfi import siStripRecHitConverter as _siStripRecHitConverter
siStripMatchedRecHits = _siStripRecHitConverter.clone()
|
tools/getsize.py | bontchev/wlscrape | 110 | 11121906 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import locale
import json
import sys
__author__ = "<NAME> <<EMAIL>>"
__license__ = "GPL"
__VERSION__ = "1.00"
def error(e):
print("Error: %s." % e, file=sys.stderr)
sys.exit(-1)
def humanBytes(B):
'Return the given bytes as a human friendly KB, MB, GB, or TB string'
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')
elif KB <= B < MB:
return '{0:.2f} Kb'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} Mb'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} Gb'.format(B/GB)
elif TB <= B:
return '{0:.2f} Tb'.format(B/TB)
def getTrueSize(number, unit):
if (unit == "B"):
return number
elif (unit == "KiB"):
return number * 1024
elif (unit == "MiB"):
return number * 1024 ** 2
elif (unit == "GiB"):
return number * 1024 ** 3
else:
error("Unknown unit: " + unit)
if __name__ == "__main__":
parser = argparse.ArgumentParser(version="%(prog)s version " + __VERSION__,
description="Computes the total files size of a wlscrape.py output.")
parser.add_argument("file", nargs="+", help="JSON data file")
args = parser.parse_args()
numFiles = 0
totalSize = 0.0
for argument in args.file:
try:
with open(argument, "r") as contentFile:
content = contentFile.read()
jsonData = json.loads(content)
for element in jsonData:
numFiles += 1
parts = element["size"].split(None)
totalSize += getTrueSize(float(parts[0]), parts[1])
except Exception as e:
error(e)
locale.setlocale(locale.LC_ALL, "")
print("Number of files found: %s." % locale.format("%d", numFiles, grouping=True), file=sys.stderr)
print("Total size: {0}.".format(humanBytes(totalSize)), file=sys.stderr)
sys.exit(0)
|
tests/basics/is_isnot.py | rxchen/micropython | 13,648 | 11121910 | print([1, 2] is [1, 2])
a = [1, 2]
b = a
print(b is a)
|
keras_cv_attention_models/convnext/convnext.py | dcleres/keras_cv_attention_models | 140 | 11121939 | <gh_stars>100-1000
from tensorflow import keras
from keras_cv_attention_models.attention_layers import (
activation_by_name,
ChannelAffine,
conv2d_no_bias,
depthwise_conv2d_no_bias,
drop_block,
layer_norm,
HeadInitializer,
add_pre_post_process,
)
from keras_cv_attention_models.download_and_load import reload_model_weights
LAYER_NORM_EPSILON = 1e-6
PRETRAINED_DICT = {
"convnext_tiny": {"imagenet": "1deac703865e190528899d5c489afa37"},
"convnext_small": {"imagenet": "7e75873348d445eb2aab4200a5d49f80"},
"convnext_base": {
"imagenet": {224: "dddac5dcd13bffc1e05688f529726f8c", 384: "ae8dc9bbca6472dc12de30db95ea1018"},
"imagenet21k-ft1k": {224: "40f78cec6cd327392a9d24f968f9e76b", 384: "4829ff932a930117525920317083d317"},
},
"convnext_large": {
"imagenet": {224: "32d401c254b623d36c22f232884000ba", 384: "01b4e72ca589c2f0ac15551e06d29818"},
"imagenet21k-ft1k": {224: "dc211e955875f8ab6de7518253e41a46", 384: "68ef87754d6ca634e32d2326c34ddd0b"},
},
"convnext_xlarge": {"imagenet21k-ft1k": {224: "7c7ab46f41ac34655f3e035b873a2163", 384: "636db850c0a73ba10e8ab32e91c38df6"}},
}
def block(inputs, output_channel, layer_scale_init_value=1e-6, drop_rate=0, activation="gelu", name=""):
nn = depthwise_conv2d_no_bias(inputs, kernel_size=7, padding="SAME", use_bias=True, name=name)
nn = layer_norm(nn, epsilon=LAYER_NORM_EPSILON, name=name)
nn = keras.layers.Dense(4 * output_channel, name=name + "up_dense")(nn)
nn = activation_by_name(nn, activation, name=name)
nn = keras.layers.Dense(output_channel, name=name + "down_dense")(nn)
if layer_scale_init_value > 0:
nn = ChannelAffine(use_bias=False, weight_init_value=layer_scale_init_value, name=name + "gamma")(nn)
nn = drop_block(nn, drop_rate=drop_rate, name=name)
return keras.layers.Add(name=name + "output")([inputs, nn])
def ConvNeXt(
num_blocks=[3, 3, 9, 3],
out_channels=[96, 192, 384, 768],
stem_width=-1,
layer_scale_init_value=1e-6,
head_init_scale=1.0,
input_shape=(224, 224, 3),
num_classes=1000,
activation="gelu",
drop_connect_rate=0.1,
classifier_activation="softmax",
dropout=0,
pretrained=None,
model_name="convnext",
kwargs=None,
):
inputs = keras.layers.Input(input_shape)
""" Stem """
stem_width = stem_width if stem_width > 0 else out_channels[0]
nn = conv2d_no_bias(inputs, stem_width, kernel_size=4, strides=4, padding="VALID", use_bias=True, name="stem_")
nn = layer_norm(nn, epsilon=LAYER_NORM_EPSILON, name="stem_")
""" Blocks """
total_blocks = sum(num_blocks)
global_block_id = 0
for stack_id, (num_block, out_channel) in enumerate(zip(num_blocks, out_channels)):
stack_name = "stack{}_".format(stack_id + 1)
if stack_id > 0:
nn = layer_norm(nn, epsilon=LAYER_NORM_EPSILON, name=stack_name + "downsample_")
nn = conv2d_no_bias(nn, out_channel, kernel_size=2, strides=2, use_bias=True, name=stack_name + "downsample_")
for block_id in range(num_block):
block_name = stack_name + "block{}_".format(block_id + 1)
block_drop_rate = drop_connect_rate * global_block_id / total_blocks
nn = block(nn, out_channel, layer_scale_init_value, block_drop_rate, activation, name=block_name)
global_block_id += 1
""" Output head """
if num_classes > 0:
nn = keras.layers.GlobalAveragePooling2D(name="avg_pool")(nn)
if dropout > 0:
nn = keras.layers.Dropout(dropout, name="head_drop")(nn)
nn = layer_norm(nn, epsilon=LAYER_NORM_EPSILON, name="head_")
head_init = HeadInitializer(scale=head_init_scale)
nn = keras.layers.Dense(
num_classes, dtype="float32", activation=classifier_activation, kernel_initializer=head_init, bias_initializer=head_init, name="predictions"
)(nn)
model = keras.models.Model(inputs, nn, name=model_name)
add_pre_post_process(model, rescale_mode="torch")
reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release="convnext", pretrained=pretrained)
return model
def ConvNeXtTiny(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet", **kwargs):
num_blocks = [3, 3, 9, 3]
out_channels = [96, 192, 384, 768]
return ConvNeXt(**locals(), model_name="convnext_tiny", **kwargs)
def ConvNeXtSmall(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet", **kwargs):
num_blocks = [3, 3, 27, 3]
out_channels = [96, 192, 384, 768]
return ConvNeXt(**locals(), model_name="convnext_small", **kwargs)
def ConvNeXtBase(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet", **kwargs):
num_blocks = [3, 3, 27, 3]
out_channels = [128, 256, 512, 1024]
return ConvNeXt(**locals(), model_name="convnext_base", **kwargs)
def ConvNeXtLarge(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet", **kwargs):
num_blocks = [3, 3, 27, 3]
out_channels = [192, 384, 768, 1536]
return ConvNeXt(**locals(), model_name="convnext_large", **kwargs)
def ConvNeXtXlarge(input_shape=(224, 224, 3), num_classes=1000, classifier_activation="softmax", pretrained="imagenet21k-ft1k", **kwargs):
num_blocks = [3, 3, 27, 3]
out_channels = [256, 512, 1024, 2048]
return ConvNeXt(**locals(), model_name="convnext_xlarge", **kwargs)
|
test/fixture/python_scanner/imports_unknown_files.py | jcassagnol-public/scons | 1,403 | 11121962 | import doesntexist # noqa: F401
import notthere.something # noqa: F401
from notthere import a, few, things # noqa: F401 |
generate/build_tools/forge/__init__.py | flamencist/browser-extensions | 102 | 11121975 | VERSION = '3.3.62'
def get_version():
return VERSION
class ForgeError(Exception):
pass
settings = {
'LAST_STABLE': 'v1.4'
}
|
urduhack/normalization/tests/test_character.py | cinfotech94/urduhackk | 252 | 11122003 | <filename>urduhack/normalization/tests/test_character.py
# coding: utf8
"""Test cases for character class"""
from urduhack import normalize
from urduhack.normalization.character import normalize_characters, _CORRECT_URDU_CHARACTERS_MAPPING, \
normalize_combine_characters, \
COMBINE_URDU_CHARACTERS, replace_digits
from urduhack.normalization.character import punctuations_space, remove_diacritics
from urduhack.urdu_characters import URDU_ALL_CHARACTERS, URDU_ALPHABETS, URDU_DIGITS, URDU_DIACRITICS
def test_normalize():
""" Testing main function"""
text = "پاکستان ﻤﯿﮟ وسائل کی کوئی کمی نہیں ﮨﮯ۔"
expected = normalize(text)
assert isinstance(expected, str)
for char in expected:
if char == " ":
continue
assert char in URDU_ALL_CHARACTERS
def test_normalize_characters():
"""Normalize characters Test case arabic words : Urdu words"""
words: dict = {"ﻣﯿﺎﮞ": "میاں",
"ﺗﮭﺎ": "تھا",
"ﻧﮩﯽ": "نہی",
"ﺩﺭﺑﺎﻥ": "دربان",
"ﺷﺮﯾﮏ": "شریک",
"ﻭﺯﯾﺮ": "وزیر",
"ﮐﻮﻧﮯ": "کونے",
"ﺭﺍﺿﯽ": "راضی",
"ﻣﺠﮭ": "مجھ",
"ﭼﮭﭙﺮ": "چھپر",
"ﻧﻮﺟﻮﺍﻥ": "نوجوان",
"ﻣﻨﺰﻝ": "منزل",
"ﻟﮕﺎﺗﮯ": "لگاتے",
"ﺟﻮﻧﻌﻤﺖ": "جونعمت",
"ﻣﺴﻨﺪﻭﮞ": "مسندوں",
"ﭘﺎﮎ": "پاک",
"ﻋﺎﻓﯿﺖ": "عافیت",
"ﺑﺬﺍﺕ": "بذات",
"ﻧﮑﻠﻮ": "نکلو",
"ﭘﯿﺪﺍ": "پیدا",
"ﺗﻮﮌﺍ": "توڑا",
"ﮔﯿﺎ": "گیا",
"ﺧﯿﺮ": "خیر",
"ﺑﻌﺪ": "بعد",
"ﭼﺮﺑﯽ": "چربی",
"ﺧﺎﻣﻮﺷﯽ": "خاموشی",
"ﮨﭩﮯ": "ہٹے",
"ﺍﻭﻻﺩ": "اولاد",
"ﺩﯾﻨﯽ": "دینی",
"ﭼﺎﮨﮯ": "چاہے",
"ﮐﮩﺎ": "کہا",
"ﺧﺎﻟﯽ": "خالی",
"ﻣﺎﻧﮕﯿﮟ": "مانگیں",
"ﺭﮨﺘﮯ": "رہتے",
"ﻣﻔﻠﺴﯽ": "مفلسی",
"ﺩﺭﺑﺎﺭﯼ": "درباری",
"ﺑﺘﺎﺋﯿﮟ": "بتائیں",
"ﮨﻤﺖ": "ہمت",
"ﻣﺮﺩ": "مرد",
"ﺩﻭﺳﺖ": "دوست",
"ﻋﺎﺷﻘﻮ": "عاشقو",
"ﺟﻠﻮﮦ": "جلوہ",
"ﺭﮨﺘﺎ": "رہتا",
"ﮈﺍﮐﭩﺮ": "ڈاکٹر",
"ﺭﻫﺘﯽ": "رھتی",
"ﺍﯾﺴﮯ": "ایسے",
"ﺻﺎﻑ": "صاف",
"ﺗﻌﻠﯿﻢ": "تعلیم",
"ﺁﭘﮑﺎ": "آپکا",
"ﻣﺮﺩﺍﻥ": "مردان",
"ﺣﺮﺍﻣﯽ": "حرامی",
"ﻧﮑ": "نک",
"ﺯﯾﺎﺩﮦ": "زیادہ",
"ﻧﻮﺟﻮﻥ": "نوجون",
"ﺧﺎﻧﮯ": "خانے",
"ﺭﺍﮦ ﺳﮯ": "راہ سے",
"ﻣﺤﺘﺮﻣﮧ": "محترمہ",
"ﺟﺎﻧﻮﺭ": "جانور",
"ﻧﮯﺍﯾﮏ": "نےایک",
"ﻣﺤﺒﻮﺏ": "محبوب",
"ﺧﻮﺵ": "خوش",
"ﺳﺎﺋﻞ": "سائل",
"ﮐﺮ": "کر",
"ﮐﮩﺎﮐﮧ": "کہاکہ",
"ﻧﺴﻮﺍﻧﯽ": "نسوانی",
"ﮨﻤﯿﮟ ﺑﻬﯽ": "ہمیں بھی",
"ﺍﺭﺍﺩﮦ ﺑﺘﺎﯾﺎ": "ارادہ بتایا",
"ﺑﺎﭖ": "باپ",
"ﻟﮕﯿﮟ": "لگیں",
"ﺷﺨﺺ": "شخص",
"ﺭﮨﺘﺎﮨﮯ": "رہتاہے",
"ﻗﺪﺭﺕ": "قدرت",
"ﻣﺮﺿﯽ": "مرضی",
"ﮔﯿﺎﺍﻭﺭ": "گیااور",
"ﮐﭽﮫ": "کچھ",
"ﻟﮑﮫ": "لکھ",
"ﺍﻋﻈﻢ": "اعظم",
"ﺷﺨﺼﯿﺖ": "شخصیت",
"ﺧﻼﻑ": "خلاف",
"ﻏﯿﺮ": "غیر",
"ﺳﻮﺩ": "سود",
"ﺑﮩﺘﺮ": "بہتر",
"ﻫﻮﺋﮯ": "ھوئے",
"ﺳﻼﻣﺖ": "سلامت",
"ﺭﺍﺑﻄﮧ": "رابطہ",
"ﮨﻮﮔﯽ": "ہوگی",
"ﻣﺮﺽ": "مرض",
"ﺳﻔﺮ": "سفر",
"ﻣﻔﺴﺮ": "مفسر",
"ﻧﺼﻒ": "نصف",
"ﮨﻮﮞ ﺟﺲ": "ہوں جس",
"ﭘﯿﭙﺮﺯ": "پیپرز",
"ﺑﻦ": "بن",
"ﮔﻨﮩﮕﺎﺭ": "گنہگار",
"ﺭﮨﯽ": "رہی",
"ﻣ": "م",
"ﺧﺎﻭﻧﺪ": "خاوند",
"ﺩﮐﮭﺎﺗﺎ": "دکھاتا",
"ﺟﺎﺳﮑﺘﮯ": "جاسکتے",
"ﺣﻞ": "حل",
"ﺗﺠﺮﺑﮧ": "تجربہ",
"ﮨﺎﺭﻧﮯ": "ہارنے",
"ﺳﺠﺎ": "سجا",
"ﺭﻭﻧﻖ": "رونق",
"ﺑﻨﻮﮞ": "بنوں",
"ﺳﮑﺘﯽ": "سکتی",
"ﮐﮧ ﺭﺍﺳﺘﮯ": "کہ راستے",
"ﻭﺍﻟﯽ": "والی",
"ﺣﻔﺎﻇﺖ": "حفاظت",
"ﺳﯿﺪﮬﺎ": "سیدھا",
"ﺍﻭﻧﭩﻨﯽ": "اونٹنی",
"ﺟﺎﻧﮯ": "جانے",
"ﺑﻼﯾﺎ": "بلایا",
"ﻓﺎﺋﺪﮦ": "فائدہ",
"ﮔﺎﺋﮯ": "گائے",
"ﻻﮨﻮﺭ": "لاہور",
"ﺑﭩﮭﺎﺅﮞ": "بٹھاؤں",
"اشیاﺀ": "اشیاء",
"کیلﺌے": "کیلئے",
"باعﺚ": "باعث",
"كيا خطا": "کیا خطا",
"حم مر كر": "حم مر کر",
"تم كيا كر": "تم کیا کر",
"كن فا يا كن": "کن فا یا کن",
"مر كر ﻓﺎﺋﺪﮦ": "مر کر فائدہ",
"تم كيا كرو": "تم کیا کرو",
"تم کیا کر": "تم کیا کر",
"گنہگار مر": "گنہگار مر",
"کر موت": "کر موت",
"کیا خطا": "کیا خطا",
"قريب": "قریب",
}
for key, val in words.items():
norm = normalize_characters(key)
assert val == norm
for char in norm:
if char == " ":
continue
assert len(char) == 1
assert char in URDU_ALL_CHARACTERS, norm
def test_correct_urdu_characters():
""" Test case """
for char in URDU_ALPHABETS:
assert char in _CORRECT_URDU_CHARACTERS_MAPPING
for char in URDU_DIGITS:
assert char in _CORRECT_URDU_CHARACTERS_MAPPING
for _list in _CORRECT_URDU_CHARACTERS_MAPPING.values():
for char in _list:
assert char not in URDU_ALL_CHARACTERS
for key in _CORRECT_URDU_CHARACTERS_MAPPING:
for char in key:
assert char in URDU_ALL_CHARACTERS
def test_normalize_combine_characters():
"""Test case"""
words: dict = {
"آزاد": "آزاد",
"آپ": "آپ",
"آدھے": "آدھے",
"آج": "آج",
"آرام": "آرام",
"جرأت": "جرأت",
"کوجرأت": "کوجرأت",
"أعظم": "أعظم",
}
for key, val in words.items():
norm = normalize_combine_characters(key)
assert val == norm
for char in norm:
assert char in URDU_ALL_CHARACTERS, norm
def test_combine_urdu_characters():
""" Test case """
for chars in COMBINE_URDU_CHARACTERS:
assert len(chars) == 2
for char in chars:
assert char in URDU_ALL_CHARACTERS
for char in COMBINE_URDU_CHARACTERS.values():
assert len(char) == 1
assert char in URDU_ALL_CHARACTERS
assert char in _CORRECT_URDU_CHARACTERS_MAPPING
for key, value in COMBINE_URDU_CHARACTERS.items():
assert len(key) == 2
assert len(value) == 1
def test_punctuations_space():
"""Test cases"""
data = {"ہوتا۔ انہوں": "ہوتا۔ انہوں",
"ہوتا،انہوں": "ہوتا، انہوں",
"۔۔۔۔۔۔۔۔۔": "۔۔۔۔۔۔۔۔۔",
"۔۔۔۔،،۔۔۔۔۔": "۔۔۔۔،،۔۔۔۔۔",
"ہوتا ہے ۔ ٹائپ": "ہوتا ہے۔ ٹائپ",
"ہوتا ہے ۔ٹائپ": "ہوتا ہے۔ ٹائپ",
"ہوتا ہے؟ٹائپ": "ہوتا ہے؟ ٹائپ",
"ہوتا ہے،ٹائپ": "ہوتا ہے، ٹائپ",
"ہوتا ہے ؟ٹائپ": "ہوتا ہے؟ ٹائپ",
"ہوتا ہے ؟ ٹائپ": "ہوتا ہے؟ ٹائپ",
"ہوتا ہے۔ٹائپ": "ہوتا ہے۔ ٹائپ",
"ہوتا ہے ۔ ٹائپ": "ہوتا ہے۔ ٹائپ",
"ہوتا ہے ، ٹائپ": "ہوتا ہے، ٹائپ",
"ہوتا ہے،\n": "ہوتا ہے،\n",
}
for key, value in data.items():
assert value == punctuations_space(key)
def test_remove_diacritics():
"""remove_diacritics Test case"""
words: dict = {"اب": "اَب",
"شیر پنجاب": "شیرِ پنجاب",
"اوگول": "اُوگول",
"ای": "اِی",
"اباوگل": "اَباُوگل",
"شرپن": "شرِپن",
"ااایول": "اَاُاِیول",
"اے": "اَے",
"اوشیر": "اُوشیر",
"او": "اَو",
}
for key, val in words.items():
norm = remove_diacritics(val)
assert key == norm
for char in norm:
assert char not in URDU_DIACRITICS, norm
if char != ' ':
assert char in URDU_ALPHABETS, norm
def test_replace_digits():
"""Test Case"""
eng_text = 'سکیورٹی حکام کے مطابق جنوبی صوبے 550 میں رات گئے'
ur_text = 'سکیورٹی حکام کے مطابق جنوبی صوبے ۵۵۰ میں رات گئے'
assert replace_digits(ur_text) == eng_text
assert replace_digits(eng_text, with_english=False) == ur_text
|
pycharm2020.1.3/script/core/tool/incremental_reload.py | LaudateCorpus1/realtime-server | 465 | 11122032 | <gh_stars>100-1000
import collections
import sys
import os
from core.mobilelog.LogManager import LogManager
class ReloadRecord(object):
"""
根据上次启动、reload时记录的文件修改时间,进行增量func code reload
仅支持散包py、pyc文件,暂时不支持zipfile
"""
def __init__(self):
super(ReloadRecord, self).__init__()
self._count = 0
self._record = collections.defaultdict(float)
self.init_record()
def init_record(self):
for name, mtime in self.iter_modules():
self._record[name] = mtime
@staticmethod
def iter_modules():
for name, module in sys.modules.items():
module_file = getattr(module, '__file__', None)
if not module_file or not isinstance(module_file, (str, )) or not os.path.isfile(module_file):
# module file not found
continue
if not module_file[-3:].lower() == '.py' and not module_file[-4:].lower() == '.pyc':
# not py or pyc
continue
if module_file.lower().endswith('.pyc') and os.path.isfile(module_file[:-1]):
module_file = module_file[:-1]
mtime = os.path.getmtime(module_file)
yield name, mtime
def _generate_diff(self):
diff_list = []
for name, mtime in self.iter_modules():
if self._record[name] < mtime:
# have modify
self._record[name] = mtime
diff_list.append(name)
return diff_list
def generate_diff(self):
self._count += 1
return self._generate_diff()
_reload_record = ReloadRecord()
def init_reload_record():
_reload_record.init_record()
def set_base_to_now():
_reload_record.generate_diff()
def reload_script():
"""
增量进行funccode reload
:return:
"""
diff_list = _reload_record.generate_diff()
if not diff_list:
LogManager.get_logger().info('nothing to reload')
return False
from core.tool import reload_impl
for mod_name in diff_list:
reload_impl.reload_module(mod_name)
return True
|
pclib/test/TestSynchronizer_test.py | belang/pymtl | 206 | 11122040 | <filename>pclib/test/TestSynchronizer_test.py
#=========================================================================
# TestSynchronizer_test.py
#=========================================================================
from __future__ import print_function
import pytest
from pymtl import *
from pclib.test import TestSource, TestSink
from TestSynchronizer import TestSynchronizer, TestSynchInfo
#-------------------------------------------------------------------------
# TestHarness
#-------------------------------------------------------------------------
class TestHarness( Model ):
def __init__( s, dtype, msgs1, msgs2, synch_info, src_delay, sink_delay ):
s.src1 = TestSource( dtype, msgs1, src_delay )
s.src2 = TestSource( dtype, msgs2, src_delay )
s.synch1 = TestSynchronizer( dtype, 0, synch_info )
s.synch2 = TestSynchronizer( dtype, 1, synch_info )
s.sink1 = TestSink( dtype, msgs1, sink_delay )
s.sink2 = TestSink( dtype, msgs2, sink_delay )
s.synch_info = synch_info
s.synch_idx = 0
s.expected_num_msgs = [ i for i, _ in synch_info.synch_table[0] ][ : -1 ]
s.connect( s.src1.out, s.synch1.in_ )
s.connect( s.synch1.out, s.sink1.in_ )
s.connect( s.src2.out, s.synch2.in_ )
s.connect( s.synch2.out, s.sink2.in_ )
def check( s ):
""" Ensure the synchronization is respected by checking how many
messages the sinks received. """
assert s.sink1.sink.idx <= s.expected_num_msgs[0]
assert s.sink2.sink.idx <= s.expected_num_msgs[1]
if s.sink1.sink.idx == s.expected_num_msgs[0] and \
s.sink2.sink.idx == s.expected_num_msgs[1]:
# Once we receive enough messages, mimic the fake synchronizer (idx
# 2) to have sent its token.
s.synch_info.token_sent( 2 )
s.synch_idx += 1
if s.synch_idx < len( s.synch_info.synch_table ):
s.expected_num_msgs[0] += s.synch_info.synch_table[ s.synch_idx ][0][0]
s.expected_num_msgs[1] += s.synch_info.synch_table[ s.synch_idx ][1][0]
else:
# The end of the synch table, so set a large number of expected
# messages.
s.expected_num_msgs[0] = 10000
s.expected_num_msgs[1] = 10000
def done( s ):
return s.src1.done and s.src2.done and s.sink1.done and s.sink2.done
def line_trace( s ):
return s.src1.line_trace() + " > " + s.sink1.line_trace() + " | " + \
s.src2.line_trace() + " > " + s.sink2.line_trace() + " | " + \
s.synch_info.line_trace()
#-------------------------------------------------------------------------
# do_test
#-------------------------------------------------------------------------
def do_test( dump_vcd, src_delay, sink_delay ):
# Test messages
test_msgs1 = [
0x0000,
0x0a0a,
0x0b0b,
# synch 0
0x0c0c,
0x0d0d,
# synch 1
# synch 2
0xf0f0,
0xe0e0,
0xd0d0,
0x1441,
0x2255,
0x1d01,
0xf0f1,
# synch 3
0xe011,
0xd022,
]
test_msgs2 = [
0x1234,
0x1122,
0xaabb,
0x00aa,
0x1a1a,
0x21aa,
# synch 0
# synch 1
0x0001,
0x1111,
0x4444,
0x1050,
# synch 2
0x1100,
0x0099,
# synch 3
0x1094,
0x1859,
0x1859,
0x1953,
0x1551,
0x3355,
]
# Note that we're using a fake synchronizer at index 2 for testing with
# a single token. The test harness will use this to ensure number of
# messages that went through is what we expect.
synch_table = [ [ [3,0], [6,0], [1,0], ],
[ [2,0], [0,0], [1,0], ],
[ [0,0], [4,0], [1,0], ], ]
synch_info = TestSynchInfo( synch_table )
# Instantiate and elaborate the model
model = TestHarness( 16, test_msgs1, test_msgs2, synch_info,
src_delay, sink_delay )
model.vcd_file = dump_vcd
model.elaborate()
# Create a simulator using the simulation tool
sim = SimulationTool( model )
# Run the simulation
print()
sim.reset()
while not model.done() and sim.ncycles < 1000:
sim.print_line_trace()
model.check()
sim.cycle()
assert model.done()
# Add a couple extra ticks so that the VCD dump is nicer
sim.cycle()
sim.cycle()
sim.cycle()
@pytest.mark.parametrize( 'src_delay,sink_delay', [
( 0, 0 ),
( 1, 1 ),
( 1, 0 ),
( 5, 1 ),
( 0, 1 ),
( 1, 5 ),
( 10, 10 ),
])
def test_TestSource( dump_vcd, src_delay, sink_delay ):
do_test( dump_vcd, src_delay, sink_delay )
|
基础教程/A2-神经网络基本原理/第5步 - 非线性分类/src/ch11-NonLinearMultipleClassification/Level1_BankClassifier.py | microsoft/ai-edu | 11,094 | 11122052 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
import matplotlib.pyplot as plt
from HelperClass2.NeuralNet_2_2 import *
from HelperClass2.Visualizer_1_1 import *
train_data_name = "../../Data/ch11.train.npz"
test_data_name = "../../Data/ch11.test.npz"
if __name__ == '__main__':
dataReader = DataReader_2_0(train_data_name, test_data_name)
dataReader.ReadData()
dataReader.NormalizeY(NetType.MultipleClassifier, base=1)
fig = plt.figure(figsize=(6,6))
DrawThreeCategoryPoints(dataReader.XTrainRaw[:,0], dataReader.XTrainRaw[:,1], dataReader.YTrain, "Source Data")
plt.show()
dataReader.NormalizeX()
dataReader.Shuffle()
dataReader.GenerateValidationSet()
n_input = dataReader.num_feature
n_hidden = 3
n_output = dataReader.num_category
eta, batch_size, max_epoch = 0.1, 10, 5000
eps = 0.1
hp = HyperParameters_2_0(n_input, n_hidden, n_output, eta, max_epoch, batch_size, eps, NetType.MultipleClassifier, InitialMethod.Xavier)
net = NeuralNet_2_2(hp, "Bank_233")
#net.LoadResult()
net.train(dataReader, 100, True)
net.ShowTrainingHistory()
fig = plt.figure(figsize=(6,6))
DrawThreeCategoryPoints(dataReader.XTrain[:,0], dataReader.XTrain[:,1], dataReader.YTrain, hp.toString())
ShowClassificationResult25D(net, 50, hp.toString())
plt.show()
|
tests/integration_tests/test_neuropod.py | dantreiman/ludwig | 7,739 | 11122053 | <reponame>dantreiman/ludwig
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import platform
import sys
from typing import List, Union
import numpy as np
import pandas as pd
import pytest
import torch
from ludwig.api import LudwigModel
from ludwig.constants import NAME, PREDICTIONS, TRAINER
from ludwig.utils.neuropod_utils import export_neuropod
from tests.integration_tests.utils import (
binary_feature,
category_feature,
generate_data,
LocalTestBackend,
number_feature,
)
@pytest.mark.skipif(platform.system() == "Windows", reason="Neuropod is not supported on Windows")
@pytest.mark.skipif(sys.version_info >= (3, 9), reason="Neuropod does not support Python 3.9")
def test_neuropod_torchscript(csv_filename, tmpdir):
data_csv_path = os.path.join(tmpdir, csv_filename)
# Configure features to be tested:
bin_str_feature = binary_feature()
input_features = [
bin_str_feature,
# binary_feature(),
number_feature(),
category_feature(vocab_size=3),
# TODO: future support
# sequence_feature(vocab_size=3),
# text_feature(vocab_size=3),
# vector_feature(),
# image_feature(image_dest_folder),
# audio_feature(audio_dest_folder),
# timeseries_feature(),
# date_feature(),
# h3_feature(),
# set_feature(vocab_size=3),
# bag_feature(vocab_size=3),
]
output_features = [
bin_str_feature,
# binary_feature(),
number_feature(),
category_feature(vocab_size=3),
# TODO: future support
# sequence_feature(vocab_size=3),
# text_feature(vocab_size=3),
# set_feature(vocab_size=3),
# vector_feature()
]
backend = LocalTestBackend()
config = {"input_features": input_features, "output_features": output_features, TRAINER: {"epochs": 2}}
# Generate training data
training_data_csv_path = generate_data(input_features, output_features, data_csv_path)
# Convert bool values to strings, e.g., {'Yes', 'No'}
df = pd.read_csv(training_data_csv_path)
false_value, true_value = "No", "Yes"
df[bin_str_feature[NAME]] = df[bin_str_feature[NAME]].map(lambda x: true_value if x else false_value)
df.to_csv(training_data_csv_path)
# Train Ludwig (Pythonic) model:
ludwig_model = LudwigModel(config, backend=backend)
ludwig_model.train(
dataset=training_data_csv_path,
skip_save_training_description=True,
skip_save_training_statistics=True,
skip_save_model=True,
skip_save_progress=True,
skip_save_log=True,
skip_save_processed_input=True,
)
# Obtain predictions from Python model
preds_dict, _ = ludwig_model.predict(dataset=training_data_csv_path, return_type=dict)
# Create graph inference model (Torchscript) from trained Ludwig model.
neuropod_path = os.path.join(tmpdir, "neuropod")
export_neuropod(ludwig_model, neuropod_path)
from neuropod.loader import load_neuropod
neuropod_module = load_neuropod(neuropod_path)
def to_input(s: pd.Series) -> Union[List[str], torch.Tensor]:
if s.dtype == "object":
return np.array(s.to_list())
return s.to_numpy().astype(np.float32)
df = pd.read_csv(training_data_csv_path)
inputs = {name: to_input(df[feature.column]) for name, feature in ludwig_model.model.input_features.items()}
outputs = neuropod_module.infer(inputs)
# Compare results from Python trained model against Neuropod
assert len(preds_dict) == len(outputs)
for feature_name, feature_outputs_expected in preds_dict.items():
assert feature_name in outputs
output_values_expected = feature_outputs_expected[PREDICTIONS]
output_values = outputs[feature_name]
if output_values.dtype.type in {np.string_, np.str_}:
# Strings should match exactly
assert np.all(output_values == output_values_expected), f"feature: {feature_name}, output: predictions"
else:
assert np.allclose(output_values, output_values_expected), f"feature: {feature_name}, output: predictions"
|
pyclue/tf1/tasks/sentence_pair/siamese/predict.py | CLUEbenchmark/PyCLUE | 122 | 11122069 | #!/usr/bin/python3
"""
@Author: <NAME>
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import numpy as np
import tensorflow as tf
from pyclue.tf1.open_sources.configs import pretrained_names, pretrained_types
from pyclue.tf1.open_sources.download import get_pretrained_model
from pyclue.tf1.tasks.sentence_pair.siamese.inputs import Processor
from pyclue.tf1.tokenizers.bert_tokenizer import FullTokenizer # Add more tokenizers
class Predictor(object):
def __init__(self, model_file):
self.model_file = os.path.abspath(model_file)
# label
label_map_reverse_file = os.path.join(
self.model_file, 'label_map_reverse.json')
with tf.gfile.GFile(label_map_reverse_file, 'r') as f:
self.label_map_reverse = json.load(f)
self.labels = [item[1] for item in sorted(
self.label_map_reverse.items(), key=lambda i: i[0])]
# model
model_config_file = os.path.join(
self.model_file, 'model_config.json')
with tf.gfile.GFile(model_config_file, 'r') as f:
self.model_config = json.load(f)
self.model_name = self.model_config.get('model_name') or None
self.model_type = self.model_config.get('model_type') or None
self.vocab_file = self.model_config.get('vocab_file') or None
self.max_seq_len = self.model_config.get('max_seq_len') or 512
if not self.model_name:
assert all([self.vocab_file, self.model_type]), \
'If not given model_name provided by open_sources, ' \
'you should specify the model_type and vocab_file.'
else:
assert self.model_name in pretrained_names, \
'%s not provided by open_sources' % self.model_name
self.model_type = pretrained_types.get(self.model_name).split('_')[0]
pretrained_dir = get_pretrained_model(pretrained_name=self.model_name)
self.vocab_file = os.path.join(pretrained_dir, 'vocab.txt')
# tokenizer
if self.model_type == 'bert':
self.tokenizer = FullTokenizer(self.vocab_file)
elif self.model_type == 'albert':
self.tokenizer = FullTokenizer(self.vocab_file)
else:
raise ValueError('model_type %s unknown.' % self.model_type)
# processor
self._load_processor()
# build graph
self._build()
def _load_processor(self):
self.processor = Processor(
max_seq_len=self.max_seq_len, tokenizer=self.tokenizer, labels=self.labels)
def _build(self):
self.graph = tf.Graph()
self.sess = tf.Session()
self.meta_graph_def = tf.saved_model.loader.load(
self.sess, tags=['serve'], export_dir=self.model_file)
self.signature = self.meta_graph_def.signature_def
self.input_ids_1 = self.signature['serving_default'].inputs['input_ids_1'].name
self.input_mask_1 = self.signature['serving_default'].inputs['input_mask_1'].name
self.segment_ids_1 = self.signature['serving_default'].inputs['segment_ids_1'].name
self.input_ids_2 = self.signature['serving_default'].inputs['input_ids_2'].name
self.input_mask_2 = self.signature['serving_default'].inputs['input_mask_2'].name
self.segment_ids_2 = self.signature['serving_default'].inputs['segment_ids_2'].name
self.label_ids = self.signature['serving_default'].inputs['label_ids'].name
self.text_a_embedding = self.signature['serving_default'].outputs['text_a_embedding'].name
self.text_b_embedding = self.signature['serving_default'].outputs['text_b_embedding'].name
self.cos_sims = self.signature['serving_default'].outputs['cos_sims'].name
self.predictions = self.signature['serving_default'].outputs['predictions'].name
self.probabilities = self.signature['serving_default'].outputs['probabilities'].name
def _predict_for_single_example(self, feature):
cos_sim, prediction, probability = self.sess.run(
[self.cos_sims, self.predictions, self.probabilities],
feed_dict={
self.input_ids_1: [feature.input_ids_1],
self.input_mask_1: [feature.input_mask_1],
self.segment_ids_1: [feature.segment_ids_1],
self.input_ids_2: [feature.input_ids_2],
self.input_mask_2: [feature.input_mask_2],
self.segment_ids_2: [feature.segment_ids_2],
self.label_ids: [feature.label_id]})
return cos_sim, prediction, probability
def predict(self, texts):
assert isinstance(texts, list), 'texts format should be `list`'
assert all([isinstance(item, list) for item in texts]), 'texts item format should be `list`'
new_texts = []
for item in texts:
if len(item) == 2 or len(item) == 3:
new_texts.append([self.labels[0], item[-2], item[-1]])
else:
raise ValueError('text item should contain 2 or 3 elements')
assert all([len(item) == 3 for item in new_texts]), \
'texts item should contain 3 elements'
features = self.processor.get_features_for_inputs(new_texts)
results = []
for text, feature in zip(new_texts, features):
cos_sim, prediction, probability = self._predict_for_single_example(feature)
results.append({
'text_a': text[1],
'text_b': text[2],
'cos_sim': np.squeeze(cos_sim).tolist() / 100,
'prediction': self.label_map_reverse[str(np.squeeze(prediction).tolist())],
'probability': np.squeeze(probability).tolist()})
return results
def predict_from_file(self, input_file):
texts = self.processor.read_file(input_file)
texts = np.squeeze(texts).tolist()
return self.predict(texts)
def quality_inspection(self, input_file, save_path):
texts = self.processor.read_file(input_file)
if np.array(texts).ndim == 1:
texts = [texts]
texts = [item for item in texts if len(item) == 3]
features = self.processor.get_features_for_inputs(texts)
cos_sims, predictions, probabilities = [], [], []
for feature in features:
cos_sim, prediction, probability = self._predict_for_single_example(feature)
cos_sims.append(cos_sim)
predictions.append(prediction)
probabilities.append(probability.tolist())
if not tf.gfile.Exists(save_path):
tf.gfile.MakeDirs(save_path)
with tf.gfile.GFile(os.path.join(save_path, input_file.split('/')[-1]), 'w') as writer:
for text, prediction, probability in zip(texts, predictions, probabilities):
prediction = self.label_map_reverse[str(np.squeeze(prediction).tolist())]
if text[0] != prediction:
writer.write(
'text_a = %s, text_b = %s, '
'true = %s, pred = %s, '
'probability = %s, cos_sim = %s\n'
% (text[1], text[2], text[0], prediction, probability, cos_sim / 100))
def close(self):
self.sess.close()
def restart(self):
self._build()
|
libs/tracker/gric.py | SimOgaard/DF-VO | 361 | 11122093 | ''''''
'''
@Author: <NAME> (<EMAIL>)
@Date: 2020-03-01
@Copyright: Copyright (C) <NAME> 2020. All rights reserved. Please refer to the license file.
@LastEditTime: 2020-05-27
@LastEditors: <NAME>
@Description: This file contains functions related to GRIC computation
'''
import numpy as np
def compute_fundamental_residual(F, kp1, kp2):
"""
Compute fundamental matrix residual
Args:
F (array, [3x3]): Fundamental matrix (from view-1 to view-2)
kp1 (array, [Nx2]): keypoint 1
kp2 (array, [Nx2]): keypoint 2
Returns:
res (array, [N]): residual
"""
# get homogeneous keypoints (3xN array)
m0 = np.ones((3, kp1.shape[0]))
m0[:2] = np.transpose(kp1, (1,0))
m1 = np.ones((3, kp2.shape[0]))
m1[:2] = np.transpose(kp2, (1,0))
Fm0 = F @ m0 #3xN
Ftm1 = F.T @ m1 #3xN
m1Fm0 = (np.transpose(Fm0, (1,0)) @ m1).diagonal()
res = m1Fm0**2 / (np.sum(Fm0[:2]**2, axis=0) + np.sum(Ftm1[:2]**2, axis=0))
return res
def compute_homography_residual(H_in, kp1, kp2):
"""
Compute homography matrix residual
Args:
H (array, [3x3]): homography matrix (Transformation from view-1 to view-2)
kp1 (array, [Nx2]): keypoint 1
kp2 (array, [Nx2]): keypoint 2
Returns:
res (array, [N]): residual
"""
n = kp1.shape[0]
H = H_in.flatten()
# get homogeneous keypoints (3xN array)
m0 = np.ones((3, kp1.shape[0]))
m0[:2] = np.transpose(kp1, (1,0))
m1 = np.ones((3, kp2.shape[0]))
m1[:2] = np.transpose(kp2, (1,0))
G0 = np.zeros((3, n))
G1 = np.zeros((3, n))
G0[0]= H[0] - m1[0] * H[6]
G0[1]= H[1] - m1[0] * H[7]
G0[2]=-m0[0] * H[6] - m0[1] * H[7] - H[8]
G1[0]= H[3] - m1[1] * H[6]
G1[1]= H[4] - m1[1] * H[7]
G1[2]=-m0[0] * H[6] - m0[1] * H[7] - H[8]
magG0=np.sqrt(G0[0]*G0[0] + G0[1]*G0[1] + G0[2]*G0[2])
magG1=np.sqrt(G1[0]*G1[0] + G1[1]*G1[1] + G1[2]*G1[2])
magG0G1=G0[0]*G1[0] + G0[1]*G1[1]
alpha=np.arccos(magG0G1 /(magG0*magG1))
alg = np.zeros((2, n))
alg[0]= m0[0]*H[0] + m0[1]*H[1] + H[2] - \
m1[0]*(m0[0]*H[6] + m0[1]*H[7] + H[8])
alg[1]= m0[0]*H[3] + m0[1]*H[4] + H[5] - \
m1[1]*(m0[0]*H[6] + m0[1]*H[7] + H[8])
D1=alg[0]/magG0
D2=alg[1]/magG1
res = (D1*D1 + D2*D2 - 2.0*D1*D2*np.cos(alpha))/np.sin(alpha)
return res
def calc_GRIC(res, sigma, n, model):
"""Calculate GRIC
Args:
res (array, [N]): residual
sigma (float): assumed variance of the error
n (int): number of residuals
model (str): model type
- FMat
- EMat
- HMat
"""
R = 4
sigmasq1 = 1./ sigma**2
K = {
"FMat": 7,
"EMat": 5,
"HMat": 8,
}[model]
D = {
"FMat": 3,
"EMat": 3,
"HMat": 2,
}[model]
lam3RD=2.0 * (R-D)
sum_ = 0
for i in range(n):
tmp=res[i] * sigmasq1
if tmp<=lam3RD:
sum_ += tmp
else:
sum_ += lam3RD
sum_ += n * D * np.log(R) + K * np.log(R*n)
return sum_
|
platforms/tinyfpga_bx.py | auscompgeek/litex-buildenv | 198 | 11122124 | from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.lattice.programmer import TinyProgProgrammer
_io = [
("user_led", 0, Pins("B3"), IOStandard("LVCMOS33")),
("usb", 0,
Subsignal("d_p", Pins("B4")),
Subsignal("d_n", Pins("A4")),
Subsignal("pullup", Pins("A3")),
IOStandard("LVCMOS33")
),
("spiflash", 0,
Subsignal("cs_n", Pins("F7"), IOStandard("LVCMOS33")),
Subsignal("clk", Pins("G7"), IOStandard("LVCMOS33")),
Subsignal("mosi", Pins("G6"), IOStandard("LVCMOS33")),
Subsignal("miso", Pins("H7"), IOStandard("LVCMOS33")),
Subsignal("wp", Pins("H4"), IOStandard("LVCMOS33")),
Subsignal("hold", Pins("J8"), IOStandard("LVCMOS33"))
),
("spiflash4x", 0,
Subsignal("cs_n", Pins("F7"), IOStandard("LVCMOS33")),
Subsignal("clk", Pins("G7"), IOStandard("LVCMOS33")),
Subsignal("dq", Pins("G6 H7 H4 J8"), IOStandard("LVCMOS33"))
),
("clk16", 0, Pins("B2"), IOStandard("LVCMOS33"))
]
_connectors = [
# Putting the USB connector at top (similar to TinyFPGA BX documentation card).
# A2-H2, Pins 1-13, GPIO:0 --> GPIO:12 - Left side, starting at top going down.
# H9-A6, Pins 14-24, GPIO:13 --> GPIO:23 - Right side, starting at bottom going up.
("GPIO", "A2 A1 B1 C2 C1 D2 D1 E2 E1 G2 H1 J1 H2 H9 D9 D8 C9 A9 B8 A8 B7 A7 B6 A6"),
# G1-J2, Pins 25-31 EXTRA:0 --> EXTRA:6 - Pads on the bottom of the board.
("EXTRA", "G1 J3 J4 G9 J9 E8 J2")
]
class Platform(LatticePlatform):
name = "tinyfpga_bx"
default_clk_name = "clk16"
default_clk_period = 62.5
# TinyFPGA BX normally defines the user bitstream to begin at 0x28000
# and user data to begin at 0x50000; follow the convention here.
bootloader_size = 0x28000
gateware_size = 0x50000 - bootloader_size
# FIXME: Create a "spi flash module" object in the same way we have SDRAM
spiflash_model = "m25p16"
spiflash_read_dummy_bits = 8
spiflash_clock_div = 2
spiflash_total_size = int((8/8)*1024*1024) # 8Mbit
spiflash_page_size = 256
spiflash_sector_size = 0x10000
def __init__(self):
LatticePlatform.__init__(self, "ice40-lp8k-cm81", _io, _connectors,
toolchain="icestorm")
def create_programmer(self):
return TinyProgProgrammer()
|
cap/BlueFuzz/bluetooth_scanner.py | Charmve/BLE-Security-Att-Def | 149 | 11122131 | <gh_stars>100-1000
import bluetooth
import subprocess
import time
import os
from obd_generator import *
SCANNER_TIME = 3
# NOTE: should be run as root
def main():
try:
# switch off subprocesses output
devs = open(os.devnull,"w")
# make directory with root privileges to store pcap output file
# tshark output can be stored only in root's directories
subprocess.call("mkdir ./capture",shell=True,stdout=devs,stderr=devs)
#run tshark with root privileges on bluetooth interface
thread=subprocess.Popen(["tshark", "-w", "./capture/capture.pcap", "-i", "bluetooth0"],stdout=devs,stderr=devs)
#STEP 1: BLUETOOTH SCANNER
devices = bluetooth.discover_devices(lookup_names = True, flush_cache = True, duration = SCANNER_TIME)
if len(devices) == 0:
print ("No devices found")
thread.terminate()
quit()
i=0
dev_names = []
dev_addr = []
dev_services = []
# print services for each discovered device
for addr, name in devices:
#device_name = bluetooth.lookup_name(addr)
dev_addr.append(addr)
dev_names.append(name)
print "Device N." + str(i) + ": " + addr + ": " + name
services = []
j=0
for service in bluetooth.find_service(address = addr):
print " Service N: ", j
print " Name: ", service["name"]
print " Description: ", service["description"]
print " Protocol: ", service["protocol"]
print " Provider: ", service["provider"]
print " Port: ", service["port"]
print " Service id: ", service["service-id"]
print ""
services.append(service)
j=j+1
dev_services.append(services)
i=i+1
#STEP 2: DEVICE CHOOSING
try:
userInput=(raw_input('Chose a device number for pairing (q for quit):'))
if userInput == 'q':
thread.terminate()
quit()
deviceNum = int(userInput)
except ValueError:
print "Not a number"
thread.terminate()
quit()
if deviceNum >= len(devices):
print "Input error: no such device"
thread.terminate()
quit()
address = dev_addr[deviceNum]
name = dev_names[deviceNum]
print "You have chosen device " + str(deviceNum) + ": " + address + "(" + name + ")"
#STEP 3: CHOSE SERVICE
try:
serviceNum = int(raw_input('Chose the service number :')) # RFCOMM port
except ValueError:
print "Not a number"
thread.terminate()
quit()
chosen_services = dev_services[deviceNum]
if serviceNum >= len(chosen_services):
print "Input error: no such service"
thread.terminate()
quit()
chosen_service = chosen_services[serviceNum]
protocol = chosen_service["protocol"]
port = chosen_service["port"]
print "protocol: " + protocol
print "port: ", port
#STEP 4: PAIRING
try:
# bluetooth protocol for OBD-II interaction: RFCOMM
if protocol == "RFCOMM":
socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
elif protocol == "L2CAP":
socket = bluetooth.BluetoothSocket(bluetooth.L2CAP)
else:
print "Protocol not supported"
thread.terminate()
quit()
socket.connect((address,port))
print "Device connected"
# the first packet is equal to the first sent by the official application
socket.send("ATZ\r")
print "Sent: ATZ\r"
time.sleep(1)
# expected answer is "\r\rELM327 v1.5\r\r"
# the second packet is equal to the second sent by the official application
socket.send("ATD\r")
print "Sent: ATD\r"
time.sleep(1)
# expected answer is "\rOK\r\r"
while True:
# send pseudo-random generated data
data = generator()
socket.send(data)
print "Sent: ", data
time.sleep(1)
'''
#To receive data
received = socket.recv(1024) # Buffer size
print "received: ", received
'''
except bluetooth.btcommon.BluetoothError as err:
print err
socket.close()
thread.terminate()
quit()
except KeyboardInterrupt:
# to intercept CRTL+C interrupt
print "\nQuitting..."
thread.terminate()
quit()
if __name__ == "__main__":
main()
|
plugins/aws/test/test_config.py | someengineering/resoto | 126 | 11122148 | <filename>plugins/aws/test/test_config.py<gh_stars>100-1000
from resotolib.utils import num_default_threads
from resotolib.config import Config
from resoto_plugin_aws import AWSCollectorPlugin
def test_args():
config = Config("dummy", "dummy")
AWSCollectorPlugin.add_config(config)
Config.init_default_config()
assert Config.aws.access_key_id is None
assert Config.aws.secret_access_key is None
assert Config.aws.role is None
assert Config.aws.role_override is False
assert Config.aws.account is None
assert Config.aws.region is None
assert Config.aws.scrape_org is False
assert Config.aws.fork_process is True
assert Config.aws.scrape_exclude_account == []
assert Config.aws.assume_current is False
assert Config.aws.do_not_scrape_current is False
assert Config.aws.account_pool_size == num_default_threads()
assert Config.aws.region_pool_size == 20
assert len(Config.aws.collect) == 0
assert len(Config.aws.no_collect) == 0
|
code_sender/winauto.py | fredcallaway/SendCode | 177 | 11122154 | <reponame>fredcallaway/SendCode<gh_stars>100-1000
import ctypes
import time
import re
from ctypes import c_bool, c_uint, c_long, c_size_t, c_wchar
# most of them are derived from pywinauto
class MENUITEMINFOW(ctypes.Structure):
_fields_ = [
('cbSize', c_uint),
('fMask', c_uint),
('fType', c_uint),
('fState', c_uint),
('wID', c_uint),
('hSubMenu', c_size_t),
('hbmpChecked', c_size_t),
('hbmpUnchecked', c_size_t),
('dwItemData', c_size_t),
('dwTypeData', c_size_t),
('cch', c_uint),
('hbmpItem', c_size_t),
]
FindWindow = ctypes.windll.user32.FindWindowW
EnumWindowsProc = ctypes.CFUNCTYPE(c_bool, c_size_t, c_size_t)
EnumChildWindowsProc = ctypes.CFUNCTYPE(c_bool, c_size_t, c_size_t)
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
GetClassName = ctypes.windll.user32.GetClassNameW
BringWindowToTop = ctypes.windll.user32.BringWindowToTop
GetMenu = ctypes.windll.user32.GetMenu
GetMenuItemInfo = ctypes.windll.user32.GetMenuItemInfoW
EnumWindows = ctypes.windll.user32.EnumWindows
EnumChildWindows = ctypes.windll.user32.EnumChildWindows
PostMessage = ctypes.windll.user32.PostMessageA
keybd_event = ctypes.windll.user32.keybd_event
def get_menu_item_info(menu, index):
info = MENUITEMINFOW()
info.cbSize = ctypes.sizeof(info)
info.fMask = 31
ret = GetMenuItemInfo(menu, c_long(index), True, ctypes.byref(info))
if not ret:
raise Exception("menu item not found.")
return info
def get_menu_item_text(menu, index, info=None):
if not info:
info = get_menu_item_info(menu, index)
if info.cch:
buffer_size = info.cch + 1
text = ctypes.create_unicode_buffer(buffer_size)
info.dwTypeData = ctypes.addressof(text)
info.cch = buffer_size
GetMenuItemInfo(menu, c_long(index), True, ctypes.byref(info))
return text.value
else:
return ""
def get_window_text(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
if buff.value:
return buff.value
else:
return ""
def get_class(hwnd):
className = (c_wchar * 257)()
GetClassName(hwnd, ctypes.byref(className), 256)
return className.value
def enum_windows(callback):
proc = EnumWindowsProc(callback)
EnumWindows(proc, 0)
def enum_child_windows(hwnd, callback):
proc = EnumChildWindowsProc(callback)
EnumChildWindows(hwnd, proc, 0)
def find_window(title=None, classname=None):
windows = []
def loop_over_windows(hwnd, _):
if windows or not IsWindowVisible(hwnd):
return True
if (not title or re.match(title, get_window_text(hwnd))) and \
(not classname or get_class(hwnd) == classname):
windows.append(hwnd)
return True
try:
enum_windows(loop_over_windows)
except Exception:
pass
if windows:
window = windows[0]
return window
def find_rgui():
rgui = find_window(r"R Console.*", "Rgui")
if not rgui:
rgui = find_window(classname="Rgui Workspace")
if not rgui:
raise Exception("window not found.")
return rgui
def bring_rgui_to_top(rid):
BringWindowToTop(rid)
if get_class(rid) == "Rgui Workspace":
def bring_child(hwnd, _):
if get_window_text(hwnd).startswith("R Console"):
BringWindowToTop(hwnd)
return True
try:
enum_child_windows(rid, bring_child)
except Exception:
pass
def paste_to_rgui(rid):
menu = GetMenu(rid)
if get_menu_item_text(menu, 0):
# non-fullscreen mdi mode
submenu = get_menu_item_info(menu, 1).hSubMenu
else:
# fullscreen mdi mode or sdi mode
submenu = get_menu_item_info(menu, 2).hSubMenu
pasteid = get_menu_item_info(submenu, 1).wID
PostMessage(rid, 7, pasteid, 0) # set forcues
time.sleep(0.01)
PostMessage(rid, 273, pasteid, 0) # click
time.sleep(0.01)
def find_rstudio():
rgui = find_window(r".*RStudio", "Qt5QWindowIcon")
if not rgui:
raise Exception("window not found.")
return rgui
def paste_to_rstudio(rid, from_view=True):
time.sleep(0.01)
if not from_view:
keybd_event(18, 0, 2, 0) # alt up
keybd_event(16, 0, 2, 0) # shift up
time.sleep(0.01)
keybd_event(17, 0, 0, 0) # ctrl down
time.sleep(0.01)
PostMessage(rid, 256, ord("V"), 0)
time.sleep(0.01)
if not from_view:
keybd_event(17, 0, 2, 0) # ctrl up
time.sleep(0.01)
PostMessage(rid, 7, 0, 0)
time.sleep(0.01)
PostMessage(rid, 256, 13, 0)
time.sleep(0.01)
|
py/demo/app.py | swt2c/wave | 3,013 | 11122158 | <filename>py/demo/app.py
from h2o_wave import main, app, Q
from .dashboard_red import show_red_dashboard
from .dashboard_blue import show_blue_dashboard
from .dashboard_orange import show_orange_dashboard
from .dashboard_cyan import show_cyan_dashboard
from .dashboard_grey import show_grey_dashboard
from .dashboard_mint import show_mint_dashboard
from .dashboard_purple import show_purple_dashboard
@app('/')
async def serve(q: Q):
route = q.args['#']
q.page.drop()
if route == 'dashboards/red':
await show_red_dashboard(q)
elif route == 'dashboards/blue':
await show_blue_dashboard(q)
elif route == 'dashboards/orange':
await show_orange_dashboard(q)
elif route == 'dashboards/cyan':
await show_cyan_dashboard(q)
elif route == 'dashboards/grey':
await show_grey_dashboard(q)
elif route == 'dashboards/mint':
await show_mint_dashboard(q)
elif route == 'dashboards/purple':
await show_purple_dashboard(q)
else:
await show_red_dashboard(q)
|
test/speed.py | wazenmai/Python-WORLD | 113 | 11122166 | <filename>test/speed.py
# built-in imports
import timeit
# 3rd-party imports
import numpy as np
from scipy.io.wavfile import read as wavread
from scipy.io.wavfile import write
# local imports
from world import main
fs, x_int16 = wavread('test-mwm.wav')
x = x_int16 / (2 ** 15 - 1)
vocoder = main.World()
# profile
print(timeit.timeit("vocoder.encode(fs, x, f0_method='harvest')", globals=globals(), number=1))
|
src/pathpicker/state_files.py | houbie/PathPicker | 5,167 | 11122170 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import List
FPP_DIR = os.environ.get("FPP_DIR") or "~/.cache/fpp"
PICKLE_FILE = ".pickle"
SELECTION_PICKLE = ".selection.pickle"
OUTPUT_FILE = ".fpp.sh"
LOGGER_FILE = ".fpp.log"
def assert_dir_created() -> None:
path = os.path.expanduser(FPP_DIR)
if os.path.isdir(path):
return
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_pickle_file_path() -> str:
assert_dir_created()
return os.path.expanduser(os.path.join(FPP_DIR, PICKLE_FILE))
def get_selection_file_path() -> str:
assert_dir_created()
return os.path.expanduser(os.path.join(FPP_DIR, SELECTION_PICKLE))
def get_script_output_file_path() -> str:
assert_dir_created()
return os.path.expanduser(os.path.join(FPP_DIR, OUTPUT_FILE))
def get_logger_file_path() -> str:
assert_dir_created()
return os.path.expanduser(os.path.join(FPP_DIR, LOGGER_FILE))
def get_all_state_files() -> List[str]:
# keep this update to date! We do not include
# the script output path since that gets cleaned automatically
return [
get_pickle_file_path(),
get_selection_file_path(),
get_logger_file_path(),
get_script_output_file_path(),
]
|
how-to-use-azureml/reinforcement-learning/multiagent-particle-envs/files/util.py | lobrien/MachineLearningNotebooks | 3,074 | 11122216 | <filename>how-to-use-azureml/reinforcement-learning/multiagent-particle-envs/files/util.py
import argparse
import os
import re
from rllib_multiagent_particle_env import CUSTOM_SCENARIOS
def parse_args():
parser = argparse.ArgumentParser('MADDPG with OpenAI MPE')
# Environment
parser.add_argument('--scenario', type=str, default='simple',
choices=['simple', 'simple_speaker_listener',
'simple_crypto', 'simple_push',
'simple_tag', 'simple_spread', 'simple_adversary'
] + CUSTOM_SCENARIOS,
help='name of the scenario script')
parser.add_argument('--max-episode-len', type=int, default=25,
help='maximum episode length')
parser.add_argument('--num-episodes', type=int, default=60000,
help='number of episodes')
parser.add_argument('--num-adversaries', type=int, default=0,
help='number of adversaries')
parser.add_argument('--good-policy', type=str, default='maddpg',
help='policy for good agents')
parser.add_argument('--adv-policy', type=str, default='maddpg',
help='policy of adversaries')
# Core training parameters
parser.add_argument('--lr', type=float, default=1e-2,
help='learning rate for Adam optimizer')
parser.add_argument('--gamma', type=float, default=0.95,
help='discount factor')
# NOTE: 1 iteration = sample_batch_size * num_workers timesteps * num_envs_per_worker
parser.add_argument('--sample-batch-size', type=int, default=25,
help='number of data points sampled /update /worker')
parser.add_argument('--train-batch-size', type=int, default=1024,
help='number of data points /update')
parser.add_argument('--n-step', type=int, default=1,
help='length of multistep value backup')
parser.add_argument('--num-units', type=int, default=64,
help='number of units in the mlp')
parser.add_argument('--final-reward', type=int, default=-400,
help='final reward after which to stop training')
# Checkpoint
parser.add_argument('--checkpoint-freq', type=int, default=200,
help='save model once every time this many iterations are completed')
parser.add_argument('--local-dir', type=str, default='./logs',
help='path to save checkpoints')
parser.add_argument('--restore', type=str, default=None,
help='directory in which training state and model are loaded')
# Parallelism
parser.add_argument('--num-workers', type=int, default=1)
parser.add_argument('--num-envs-per-worker', type=int, default=4)
parser.add_argument('--num-gpus', type=int, default=0)
return parser.parse_args()
def find_final_checkpoint(start_dir):
def find(pattern, path):
result = []
for root, _, files in os.walk(path):
for name in files:
if pattern.match(name):
result.append(os.path.join(root, name))
return result
cp_pattern = re.compile('.*checkpoint-\\d+$')
checkpoint_files = find(cp_pattern, start_dir)
checkpoint_numbers = []
for file in checkpoint_files:
checkpoint_numbers.append(int(file.split('-')[-1]))
final_checkpoint_number = max(checkpoint_numbers)
return next(
checkpoint_file for checkpoint_file in checkpoint_files
if checkpoint_file.endswith(str(final_checkpoint_number)))
|
slidedeck/create.py | SunPowered/slidedeck | 187 | 11122227 | <gh_stars>100-1000
"""Code to create a template project
"""
import os
import shutil
TEMPLATE_VARIABLE = 'SLIDEDECK_TEMPLATE'
def curdir(directory):
return os.path.abspath(os.path.join(os.path.dirname(__file__), directory))
def check_env():
'''
Check the current user's environment to return important settings
'''
sd_template = os.environ.get(TEMPLATE_VARIABLE, None) or curdir('data')
return {'template_dir': sd_template}
def create_project(directory, template=None):
"""
Create a project and copy the template files into it.
"""
if os.path.exists(directory):
raise OSError("Directory '%s' already exists" % directory)
settings = check_env()
template = template or settings.get('template_dir', None)
if not os.path.exists(template):
raise OSError("Template directory '%s' does not exist" % template)
def callback(src, names):
base = os.path.relpath(src, template)
for name in names:
print("\033[92mcreate\033[0m {:s}".format(os.path.join(directory, base, name)))
return []
shutil.copytree(template, directory, ignore=callback)
|
web-scraping/pdf-url-extractor/pdf_link_extractor_regex.py | caesarcc/python-code-tutorials | 1,059 | 11122233 | <filename>web-scraping/pdf-url-extractor/pdf_link_extractor_regex.py
import fitz # pip install PyMuPDF
import re
# a regular expression of URLs
url_regex = r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)"
# extract raw text from pdf
# file = "1710.05006.pdf"
file = "1810.04805.pdf"
# open the PDF file
with fitz.open(file) as pdf:
text = ""
for page in pdf:
# extract text of each PDF page
text += page.getText()
urls = []
# extract all urls using the regular expression
for match in re.finditer(url_regex, text):
url = match.group()
print("[+] URL Found:", url)
urls.append(url)
print("[*] Total URLs extracted:", len(urls))
|
cort/test/core/test_external_data.py | leonardoboliveira/cort | 141 | 11122242 | from cort.core.external_data import GenderData
__author__ = 'smartschat'
import unittest
class TestGenderData(unittest.TestCase):
def setUp(self):
self.gender_data = GenderData.get_instance()
def test_look_up(self):
self.assertEqual("NEUTRAL",
self.gender_data.look_up({"tokens": ["snafu"]}))
self.assertEqual("FEMALE",
self.gender_data.look_up(
{"tokens": ["Barbara", "Bush"],
"head": ["Barbara", "Bush"]}))
self.assertEqual("MALE",
self.gender_data.look_up({
"tokens": ["Footballer", "Zidane"],
"head": ["Zidane"]}))
if __name__ == '__main__':
unittest.main()
|
securityheaders/models/xxssprotection/__init__.py | th3cyb3rc0p/securityheaders | 151 | 11122266 | <reponame>th3cyb3rc0p/securityheaders
from .xxssprotectiondirective import XXSSProtectionDirective
from .xxssprotectionkeyword import XXSSProtectionKeyword
from .xxssprotection import XXSSProtection
__all__ = ['XXSSProtectionDirective', 'XXSSProtectionKeyword','XXSSProtection']
|
tests/integration_tests_plugins/version_aware_v2/setup.py | ilan-WS/cloudify-manager | 124 | 11122308 |
from setuptools import setup
setup(
name='version_aware',
version='2.0',
packages=['version_aware'],
)
|
examples/basics/subscribe.py | muhammadvellani/Adafruit_IO_Python | 136 | 11122318 | """
'subscribe.py'
==========================
Subscribes to an Adafruit IO Feed
Author(s): <NAME>, <NAME> for Adafruit Industries
"""
# Import standard python modules.
import sys
# This example uses the MQTTClient instead of the REST client
from Adafruit_IO import MQTTClient
# Set to your Adafruit IO key.
# Remember, your key is a secret,
# so make sure not to publish it when you publish this code!
ADAFRUIT_IO_KEY = 'YOUR_AIO_KEY'
# Set to your Adafruit IO username.
# (go to https://accounts.adafruit.com to find your username)
ADAFRUIT_IO_USERNAME = 'YOUR_AIO_USERNAME'
# Set to the ID of the feed to subscribe to for updates.
FEED_ID = 'counter'
# Define callback functions which will be called when certain events happen.
def connected(client):
"""Connected function will be called when the client is connected to
Adafruit IO.This is a good place to subscribe to feed changes. The client
parameter passed to this function is the Adafruit IO MQTT client so you
can make calls against it easily.
"""
# Subscribe to changes on a feed named Counter.
print('Subscribing to Feed {0}'.format(FEED_ID))
client.subscribe(FEED_ID)
print('Waiting for feed data...')
def disconnected(client):
"""Disconnected function will be called when the client disconnects."""
sys.exit(1)
def message(client, feed_id, payload):
"""Message function will be called when a subscribed feed has a new value.
The feed_id parameter identifies the feed, and the payload parameter has
the new value.
"""
print('Feed {0} received new value: {1}'.format(feed_id, payload))
# Create an MQTT client instance.
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
# Setup the callback functions defined above.
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
# Connect to the Adafruit IO server.
client.connect()
# The first option is to run a thread in the background so you can continue
# doing things in your program.
client.loop_blocking()
|
utils/loss/hnm_loss.py | ZHANGHeng19931123/MutualGuide | 124 | 11122338 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
class HNMLoss(nn.Module):
def __init__(self, ratio=3.0, loss_weight=1.0):
super(HNMLoss, self).__init__()
self.ratio = ratio
self.loss_weight = loss_weight
def forward(self, pred, target, mask, reduction='mean'):
pred, target = pred[mask], target[mask]
with torch.no_grad():
num_pos = target.sum().item()
pt = pred.sigmoid() * (1 - target) + 2.0 * target
mask = torch.topk(pt, int((1+self.ratio)*num_pos))[1]
loss = F.binary_cross_entropy_with_logits(pred[mask], target[mask], reduction='none')
if reduction == 'sum':
return loss.sum()
elif reduction == 'mean':
return loss.sum() / num_pos
else:
return loss
|
fna_det/configs/fna_ssdlite_retrain.py | BaiYuYuan/FNA | 173 | 11122340 | # model settings
input_size = 300
model = dict(
type='SingleStageDetector',
pretrained=dict(
use_load=True,
load_path='./seed_mbv2.pt',
seed_num_layers=[1, 1, 2, 3, 4, 3, 3, 1, 1] # mbv2
),
backbone=dict(
type='FNA_SSDLite',
input_size=input_size,
net_config="""[[32, 16], ['k3_e1'], 1]|
[[16, 24], ['k5_e6', 'skip', 'skip', 'skip'], 2]|
[[24, 32], ['k5_e6', 'k5_e6', 'k3_e6', 'k5_e6'], 2]|
[[32, 64], ['k5_e6', 'k7_e6', 'k3_e6', 'skip'], 2]|
[[64, 96], ['k7_e6', 'k7_e6', 'k7_e6', 'k7_e6'], 1]|
[[96, 160], ['k5_e6', 'k7_e6', 'k7_e6', 'skip'], 2]|
[[160, 320], ['k7_e6'], 1]""",
out_feature_indices=(6, 8),
# l2_norm_scale=20,
),
neck=None,
bbox_head=dict(
type='SSDLightHead',
input_size=input_size,
in_channels=(576, 1280, 512, 256, 256, 128),
num_classes=81,
anchor_strides=(16, 32, 64, 107, 160, 320),
basesize_ratio_range=(0.2, 0.95),
anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.2, 0.2)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)
test_cfg = dict(
# nms_pre=1000,
min_bbox_size=0,
score_thr=0.02,
nms=dict(type='nms', iou_thr=0.6),
max_per_img=200)
# dataset settings
dataset_type = 'CocoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
data = dict(
imgs_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file= 'annotations/instances_train2017.json',
img_prefix= 'train2017/',
img_scale=(320, 320),
img_norm_cfg=img_norm_cfg,
size_divisor=None,
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=True,
test_mode=False,
extra_aug=dict(
photo_metric_distortion=dict(
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
expand=dict(
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
random_crop=dict(
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
resize_keep_ratio=False)),
val=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
img_prefix='val2017/',
img_scale=(320, 320),
img_norm_cfg=img_norm_cfg,
size_divisor=None,
flip_ratio=0,
with_mask=False,
# with_crowd=False,
with_label=False,
test_mode=True,
resize_keep_ratio=False),
test=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
img_prefix='val2017/',
img_scale=(320, 320),
img_norm_cfg=img_norm_cfg,
size_divisor=None,
flip_ratio=0,
with_mask=False,
# with_crowd=False,
with_label=False,
test_mode=True,
resize_keep_ratio=False))
# optimizer
optimizer = dict(type='RMSprop', lr=0.2, eps=1.0, weight_decay=0.00004, momentum=0.9)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[36, 50, 56])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=200,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 60
use_syncbn = True
image_size_madds = (320, 320)
# device_ids = range(8)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ssd300_coco'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
training/criterion.py | HappyBelief/ContraD | 168 | 11122343 | <reponame>HappyBelief/ContraD
import torch
import torch.nn as nn
import torch.nn.functional as F
from third_party.gather_layer import GatherLayer
def target_nll_loss(inputs, targets, reduction='none'):
inputs_t = -F.nll_loss(inputs, targets, reduction='none')
logit_diff = inputs - inputs_t.view(-1, 1)
logit_diff = logit_diff.scatter(1, targets.view(-1, 1), -1e8)
diff_max = logit_diff.max(1)[0]
if reduction == 'sum':
return diff_max.sum()
elif reduction == 'mean':
return diff_max.mean()
elif reduction == 'none':
return diff_max
else:
raise NotImplementedError()
def nt_xent(out1, out2, temperature=0.1, distributed=False, normalize=False):
"""Compute NT_xent loss"""
assert out1.size(0) == out2.size(0)
if normalize:
out1 = F.normalize(out1)
out2 = F.normalize(out2)
if distributed:
out1 = torch.cat(GatherLayer.apply(out1), dim=0)
out2 = torch.cat(GatherLayer.apply(out2), dim=0)
N = out1.size(0)
_out = [out1, out2]
outputs = torch.cat(_out, dim=0)
sim_matrix = outputs @ outputs.t()
sim_matrix = sim_matrix / temperature
sim_matrix.fill_diagonal_(-5e4)
sim_matrix = F.log_softmax(sim_matrix, dim=1)
loss = -torch.sum(sim_matrix[:N, N:].diag() + sim_matrix[N:, :N].diag()) / (2*N)
return loss
|
scenic/projects/vivit/train_utils.py | techthiyanes/scenic | 688 | 11122402 | """Training Utilities for ViViT."""
import functools
from typing import Callable, Dict, List, Optional, Tuple, Union
from absl import logging
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental.optimizers import clip_grads
import jax.numpy as jnp
import jax.profiler
import matplotlib.pyplot as plt
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.model_lib.base_models import model_utils
from scenic.train_lib import optimizers
from scenic.train_lib import train_utils
import seaborn as sns
# Aliases for custom types:
Array = Union[jnp.ndarray, np.ndarray]
Batch = Dict[str, jnp.ndarray]
MetricFn = Callable[[jnp.ndarray, Dict[str, jnp.ndarray]],
Dict[str, Tuple[float, int]]]
LossFn = Callable[[jnp.ndarray, Batch, Optional[jnp.ndarray]], float]
def to_cpu(array: jnp.ndarray):
"""Transfers array (replicated on multiple hosts) to a single host.
Args:
array: Replicated array of shape
[num_hosts, num_devices, local_batch_size, ...]
Returns:
array of shape [global_batch_size, ...] where
global_batch_size = num_devices * local_batch_size
"""
return jax.device_get(dataset_utils.unshard(jax_utils.unreplicate(array)))
def train_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
learning_rate_fn: Callable[[int], float],
loss_fn: LossFn,
metrics_fn: MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
train_state: The state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
flax_model: A Flax model.
learning_rate_fn: learning rate scheduler which give the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configuration of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
if config.get('mixup') and config.mixup.alpha:
mixup_rng, rng = jax.random.split(rng, 2)
mixup_rng = train_utils.bind_rng_to_host_device(
mixup_rng,
axis_name='batch',
bind_to=config.mixup.get('bind_to', 'device'))
batch = dataset_utils.mixup(
batch,
config.mixup.alpha,
config.mixup.get('image_format', 'NTHWC'),
rng=mixup_rng)
# Bind the rng to the host/device we are on for dropout.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
logits, new_model_state = flax_model.apply(
variables,
batch['inputs'],
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(logits, batch, variables['params'])
return loss, (new_model_state, logits)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
if config.get('sam_rho', None) is None:
# Normal training
(train_cost,
(new_model_state,
logits)), grad = compute_gradient_fn(train_state.optimizer.target)
else:
# SAM training, taken from cl/373487774
def dual_vector(y: jnp.ndarray) -> jnp.ndarray:
"""Returns the solution of max_x y^T x s.t. ||x||_2 <= 1."""
gradient_norm = jnp.sqrt(sum(
[jnp.sum(jnp.square(e)) for e in jax.tree_util.tree_leaves(y)]))
normalized_gradient = jax.tree_map(
lambda x: x / (gradient_norm + 1e-7), y)
return normalized_gradient
g_sam, _ = jax.grad(training_loss_fn, has_aux=True)(
train_state.optimizer.target)
g_sam = dual_vector(g_sam)
target_sam = jax.tree_multimap(lambda a, b: a + config.get('sam_rho') * b,
train_state.optimizer.target, g_sam)
(train_cost,
(new_model_state,
logits)), grad = compute_gradient_fn(target_sam)
# TODO(dehghani,aarnab): Check how to move this after the pmeam.
if config.get('max_grad_norm', None) is not None:
grad = clip_grads(grad, config.max_grad_norm)
del train_cost
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(logits, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
metrics_fn: MetricFn,
return_logits_and_labels: bool = False,
return_confusion_matrix: bool = False,
debug: Optional[bool] = False
) -> Union[Tuple[Dict[str, Tuple[float, int]], jnp.ndarray, jnp.array],
Tuple[Dict[str, Tuple[float, int]], jnp.ndarray],
Dict[str, Tuple[float, int]]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
flax_model: A Flax model.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
return_logits_and_labels: If true, returns logits and labels. Can be used
for calculating the Mean Average Precision for multi-label problems.
Only one of "return_logits_and_labels" and "return_confusion_matrix"
should be true, with the latter taking precedence if both are set as true.
return_confusion_matrix: If true, returns confusion matrix. Can be used
to calculate additional metrics for k-way classification problems.
debug: Whether the debug mode is enabled during evaluation.
`debug=True` enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics [and optionally logits or confusion matrix].
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
logits = flax_model.apply(
variables, batch['inputs'], train=False, mutable=False, debug=debug)
metrics = metrics_fn(logits, batch)
if return_confusion_matrix:
confusion_matrix = get_confusion_matrix(
labels=batch['label'], logits=logits, batch_mask=batch['batch_mask'])
confusion_matrix = jax.lax.all_gather(confusion_matrix, 'batch')
return metrics, confusion_matrix
if return_logits_and_labels:
logits = jax.lax.all_gather(logits, 'batch')
labels = jax.lax.all_gather(batch['label'], 'batch')
return metrics, logits, labels
return metrics
def test_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
metrics_fn: MetricFn,
n_clips: int = 2,
return_logits_and_labels: bool = False,
softmax_logits: bool = False,
debug: bool = False
) -> Union[Dict[str, Tuple[float, int]], Tuple[Dict[str, Tuple[float, int]],
jnp.array, jnp.array]]:
"""Runs a single step of testing.
For multi-crop testing, we assume that num_crops consecutive entries in the
batch are from the same example. And we average the logits over these examples
We assume that the batch contains different crops of the same original
example. Therefore, we can average all the logits of it.
This assumption is true when local_batch_size = num_local_devices
Args:
train_state: The state of training including the current
global_step, model_state, rng, and optimizer, and other metadata.
batch: Dictionary with keys 'inputs', 'labels', 'batch_mask'. We assume that
all the inputs correspond to the same original example in the test set.
The input shapes to this function are batch['inputs'] = [num_crops, t, h,
w, c] batch['labels'] = [num_crops, num_classes] However, for
classification, the labels for all the crops are the same.
batch['batch_mask'] = [num_crops]
flax_model: A Flax model.
metrics_fn: Metrics function for the model.
n_clips: The number of clips to process at a time by each device. Set
due to memory constraints.
return_logits_and_labels: Whether return logits of the model or not.
softmax_logits: Whether to softmax-normalise the logits before
averaging
debug: Whether the debug mode is enabled during evaluation.
`debug=True` enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics [and optionally averaged logits that are of
shape `[1, num_classes]`].
"""
all_logits = jnp.zeros(batch['label'].shape[1])
assert len(batch['batch_mask'].shape) == 1, (
'Spatial padding is not supported in multi-crop evaluation.')
num_crops = batch['inputs'].shape[0]
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
for idx in range(0, num_crops, n_clips):
temp_input = batch['inputs'][idx:idx + n_clips]
logits = flax_model.apply(
variables, temp_input, train=False, mutable=False, debug=debug)
if softmax_logits:
logits = nn.softmax(logits, axis=-1)
logits = jnp.sum(logits, axis=0)
all_logits = all_logits + logits
all_logits = all_logits / num_crops
all_logits = jnp.expand_dims(all_logits, axis=0)
batch['label'] = jnp.expand_dims(batch['label'][0], axis=0)
batch['batch_mask'] = jnp.expand_dims(batch['batch_mask'][0], axis=0)
metrics = metrics_fn(all_logits, batch)
if return_logits_and_labels:
return metrics, all_logits, batch['label']
return metrics
def get_confusion_matrix(labels: Array, logits: Array,
batch_mask: Array) -> Array:
"""Computes confusion matrix from predictions.
Args:
labels: [n_batch] or [n_batch, n_classes] array. In the latter case, labels
are assumed to be one-hot, since the confusion matrix is only defined when
each example has one label.
logits: [n_batch, n_classes] array, which are the predictions of the model.
batch_mask: [n_batch] array. Entries should be 1 or 0, and indicate if the
example is valid or not.
Returns:
confusion_matrix of shape [1, n_classes, n_classes]
"""
if labels.ndim == logits.ndim: # one-hot targets
y_true = jnp.argmax(labels, axis=-1)
else:
y_true = labels
y_pred = jnp.argmax(logits, axis=-1)
# Prepare sample weights for confusion matrix:
weights = batch_mask.astype(jnp.float32)
confusion_matrix = model_utils.confusion_matrix(
y_true=y_true,
y_pred=y_pred,
num_classes=logits.shape[-1],
weights=weights)
confusion_matrix = confusion_matrix[jnp.newaxis, ...] # Dummy batch dim.
return confusion_matrix
def render_confusion_matrices(confusion_matrices: List[Array],
normalization_method: str = 'cols',
figsize: Tuple[int, int] = (12, 12),
dpi: int = 100,
font_scale: int = 3) -> Array:
"""Render confusion matrix so that it can be logged to Tensorboard.
Args:
confusion_matrices: List of [n_batch, n_class, n_class] confusion matrices.
The first two dimensions will be summed over to get an [n_class, n_class]
matrix for rendering.
normalization_method: Method of normalizing the confusion matrix before
plotting. Supported values are one of "cols", "rows" and "none".
If any other value, no normalization is performed.
figsize: The figure size used by matplotlib and seaborn.
dpi: The dpi used by matplotlib and seaborn.
font_scale: The font scale used by seaborn.
Returns:
image: Rendered image of the confusion matrix for plotting. Data type is
uint8 and values are in range [0, 255]. Shape is
[1, figsize * dpi, figsize * dpi, 3]
"""
conf_matrix = np.sum(confusion_matrices, axis=0) # Sum over eval batches.
if conf_matrix.ndim != 3:
raise AssertionError(
'Expecting confusion matrix to have shape '
f'[batch_size, num_classes, num_classes], got {conf_matrix.shape}.')
conf_matrix = np.sum(conf_matrix, axis=0) # Sum over batch dimension.
if normalization_method not in {'rows', 'cols', 'none'}:
logging.warning('Normalizer must be one of {rows, cols, none}.'
'Defaulting to none.')
sns.set(font_scale=font_scale)
fig = plt.figure(figsize=figsize, dpi=dpi)
# Normalize entries of the confusion matrix.
if normalization_method == 'rows':
normalizer = conf_matrix.sum(axis=1)[:, np.newaxis]
elif normalization_method == 'cols':
normalizer = conf_matrix.sum(axis=0)[np.newaxis, :]
else:
normalizer = 1
normalized_matrix = np.nan_to_num(conf_matrix / normalizer)
if np.sum(normalized_matrix) > 0:
sns.heatmap(
normalized_matrix,
annot=True,
linewidths=0.5,
square=True,
cbar=False,
cmap='jet',
annot_kws={'size': 18})
fig.tight_layout(pad=0.0)
fig.canvas.draw()
ncols, nrows = fig.canvas.get_width_height()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
image = image.reshape(nrows, ncols, 3)
return np.expand_dims(image, axis=0)
|
Arduino/speedTest.py | yuliya-sm7/EvoArm | 110 | 11122414 | import serial
import sys
import threading
import binascii
import time
ser = serial.Serial('COM3', 250000)
def checksum(bytes):
sum = 0
for b in bytes:
sum += ord(b)
return chr((~sum) & 0xFF)
def genPacket(data):
try:
bytes = binascii.unhexlify(data)
num = len(bytes)+3
bytes = chr(0) + chr(num) + '\xFF\xFF' + bytes + checksum(bytes)
return bytes
except:
print('Bad input {0}'.format(data))
return None
def send():
packet = genPacket('020201')
ser.write(packet)
# get response
#print('Response >> {0}'.format(binascii.hexlify(receive())))
def receive():
while ser.in_waiting < 1:
pass
return ser.read_all()
time.sleep(2)
timer = time.clock()
for i in xrange(100):
send()
receive()
print('{0:.3f}s'.format(time.clock()-timer))
|
observations/r/australian_elections.py | hajime9652/observations | 199 | 11122422 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def australian_elections(path):
"""elections to Australian House of Representatives, 1949-2007
Aggregate data on the 24 elections to Australia's House of
Representatives, 1949 to 2007.
A data frame with the following variables:
`date`
date of election, stored using the `Date` class
`Seats`
numeric, number of seats in the House of Representatives
`Uncontested`
numeric, number of uncontested seats
`ALPSeats`
numeric, number of seats won by the Australian Labor Party
`LPSeats`
numeric, number of seats won by the Liberal Party
`NPSeats`
numeric, number of seats won by the National Party (previously known
as the Country Party)
`OtherSeats`
numeric, number of seats won by other parties and/or independent
candidates
`ALP`
numeric, percentage of first preference votes cast for Australian
Labor Party candidates
`ALP2PP`
numeric, percentage of the two-party preferred vote won by
Australian Labor Party candidates
`LP`
numeric, percent of first preference votes cast for Liberal Party
candidates
`NP`
numeric, percent of first preference votes cast for National Party
(Country Party) candidates
`DLP`
numeric, percent of first preference votes cast for Democratic Labor
Party candidates
`Dem`
numeric, percent of first preference votes cast for Australian
Democrat candidates
`Green`
numeric, percent of first preference votes cast for Green Party
candidates
`Hanson`
numeric, percent of first preference votes cast for candidates from
Pauline Hanson's One Nation party
`Com`
numeric, percent of first preference votes cast for Communist Party
candidates
`AP`
numeric, percent of first preference votes cast for Australia Party
candidates
`Informal`
numeric, percent of ballots cast that are spoiled, blank, or
otherwise uncountable (usually because of errors in enumerating
preferences)
`Turnout`
numeric, percent of enrolled voters recorded as having turned out to
vote (Australia has compulsory voting)
Australian Electoral Commission. http://www.aec.gov.au.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `australian_elections.csv`.
Returns:
Tuple of np.ndarray `x_train` with 24 rows and 19 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'australian_elections.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/pscl/AustralianElections.csv'
maybe_download_and_extract(path, url,
save_file_name='australian_elections.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
src/c3nav/api/apps.py | johnjohndoe/c3nav | 132 | 11122440 | <filename>src/c3nav/api/apps.py<gh_stars>100-1000
from django.apps import AppConfig
from django.conf import settings
from django.db.models.signals import post_save
class APIConfig(AppConfig):
name = 'c3nav.api'
def ready(self):
from c3nav.api.signals import remove_tokens_on_user_save
post_save.connect(remove_tokens_on_user_save, sender=settings.AUTH_USER_MODEL)
|
tests/java.py | codelv/enaml-native | 237 | 11122466 | """
Copyright (c) 2017-2018, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Jan 18, 2018
@author
"""
import hashlib
from textwrap import dedent
from enamlnative.android.bridge import (
JavaBridgeObject, JavaMethod, JavaStaticMethod, JavaField, JavaCallback,
JavaProxy
)
def get_member_id(cls, m):
"""
Parameters
----------
cls
m
Returns
-------
"""
return hashlib.
def find_java_classes(cls):
""" Find all java classes. Pulled from
Parameters
----------
cls: Type or Class
Class to find
Returns
-------
result: List
All of subclasses of the given class
References
-----------
- https://stackoverflow.com/questions/3862310/
"""
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(find_java_classes(subclass))
return all_subclasses
def generate_source(cls):
""" Generate java source to decode and use the object directly without
reflection.
Parameters
----------
cls: JavaBridgeObject
Class to generate jova source for
Returns
-------
"""
#: Java class name
classname = cls.__nativeclass__.default_value_mode[-1]
source = dedent("""
package com.codelv.enamlnative.gen;
class Bridge{classname} implements BridgeInterface {{
public {classname} createObject(int constructorId, Value[] args) {{
switch (constructorId) {{
{constructors}
}}
}}
public Object invokeStatic(int methodId, Value[] args) {{
switch (methodId) {{
{staticmethods}
}}
}}
public Object invokeMethod(Object objRef, int methodId, Value[] args) {{
switch (methodId) {{
{methods}
}}
}}
public void setField(Object objRef, int fieldId, Value[] args) {{
{classname} obj = ({classname}) objRef;
switch (fieldId) {{
{fields}
}}
}}
}}
""")
#: Find all java fields, methods, etc...
methods = []
fields = []
static_methods = []
for m in cls.members().values():
if isinstance(m, JavaMethod):
if m.__returns__:
methods.append(dedent("""
case {id}:
return obj.{m.name}({method_args});
"""))
else:
methods.append(dedent("""
case {id}:
obj.{m.name}({method_args});
break;
"""))
elif isinstance(m, JavaField):
fields.append(dedent("""
case {id}:
obj.{m.name} = {value};
break;
""").format(m=m, id=get_member_id(cls, m)))
elif isinstance(m, JavaStaticMethod):
if m.__returns__:
static_methods.append(dedent("""
case {id}:
return obj.{m.name}({method_args});
"""))
else:
static_methods.append(dedent("""
case {method_id}:
obj.{method_name}({method_args});
break;
"""))
#: Return the rendered source
return source.format(classname=classname,
methods="\n ".join(methods),
static_methods="\n ".join(static_methods),
fields="\n ".join(fields))
def generate():
""" Generate the Java source used to eliminate the need for using
reflection over the bridge.
"""
#: Import all the classes first
from enamlnative.android import api
from enamlnative.android.factories import ANDROID_FACTORIES
for name, factory in ANDROID_FACTORIES.items():
factory()
#: Now gather them all
java_classes = find_java_classes(JavaBridgeObject)
#: Now generate it
for cls in java_classes:
generate_source(cls) |
deepcpg/evaluation.py | cangermueller/deepcpg2 | 151 | 11122471 | """Functions for evaluating prediction performance."""
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import pandas as pd
import sklearn.metrics as skm
from scipy.stats import kendalltau
from six.moves import range
from .data import CPG_NAN, OUTPUT_SEP
from .utils import get_from_module
def cor(y, z):
"""Compute Pearson's correlation coefficient."""
return np.corrcoef(y, z)[0, 1]
def kendall(y, z, nb_sample=100000):
"""Compute Kendall's correlation coefficient."""
if len(y) > nb_sample:
idx = np.arange(len(y))
np.random.shuffle(idx)
idx = idx[:nb_sample]
y = y[idx]
z = z[idx]
return kendalltau(y, z)[0]
def mad(y, z):
"""Compute mean absolute deviation."""
return np.mean(np.abs(y - z))
def mse(y, z):
"""Compute mean squared error."""
return np.mean((y - z)**2)
def rmse(y, z):
"""Compute root mean squared error."""
return np.sqrt(mse(y, z))
def auc(y, z, round=True):
"""Compute area under the ROC curve."""
if round:
y = y.round()
if len(y) == 0 or len(np.unique(y)) < 2:
return np.nan
return skm.roc_auc_score(y, z)
def acc(y, z, round=True):
"""Compute accuracy."""
if round:
y = np.round(y)
z = np.round(z)
return skm.accuracy_score(y, z)
def tpr(y, z, round=True):
"""Compute true positive rate."""
if round:
y = np.round(y)
z = np.round(z)
return skm.recall_score(y, z)
def tnr(y, z, round=True):
"""Compute true negative rate."""
if round:
y = np.round(y)
z = np.round(z)
c = skm.confusion_matrix(y, z)
return c[0, 0] / c[0].sum()
def mcc(y, z, round=True):
"""Compute Matthew's correlation coefficient."""
if round:
y = np.round(y)
z = np.round(z)
return skm.matthews_corrcoef(y, z)
def f1(y, z, round=True):
"""Compute F1 score."""
if round:
y = np.round(y)
z = np.round(z)
return skm.f1_score(y, z)
def cat_acc(y, z):
"""Compute categorical accuracy given one-hot matrices."""
return np.mean(y.argmax(axis=1) == z.argmax(axis=1))
# Classification metrics.
CLA_METRICS = [auc, acc, tpr, tnr, f1, mcc]
# Regression metrics.
REG_METRICS = [mse, mad, cor]
# Categorical metrics.
CAT_METRICS = [cat_acc]
def evaluate(y, z, mask=CPG_NAN, metrics=CLA_METRICS):
"""Compute multiple performance metrics.
Computes evaluation metrics using functions in `metrics`.
Parameters
----------
y: :class:`numpy.ndarray`
:class:`numpy.ndarray` vector with labels.
z: :class:`numpy.ndarray`
:class:`numpy.ndarray` vector with predictions.
mask: scalar
Value to mask unobserved labels in `y`.
metrics: list
List of evaluation functions to be used.
Returns
-------
Ordered dict
Ordered dict with name of evaluation functions as keys and evaluation
metrics as values.
"""
z = z.ravel()
if mask is not None:
t = y != mask
y = y[t]
z = z[t]
p = OrderedDict()
for metric in metrics:
if len(y):
p[metric.__name__] = metric(y, z)
else:
p[metric.__name__] = np.nan
p['n'] = len(y)
return p
def evaluate_cat(y, z, metrics=CAT_METRICS,
binary_metrics=None):
"""Compute multiple performance metrics for categorical outputs.
Computes evaluation metrics for categorical (one-hot encoded labels) using
functions in `metrics`.
Parameters
----------
y: :class:`numpy.ndarray`
:class:`numpy.ndarray` matrix with one-hot encoded labels.
z: :class:`numpy.ndarray`
:class:`numpy.ndarray` matrix with class probabilities in rows.
metrics: list
List of evaluation functions to be used.
binary_metrics: list
List of binary evaluation metrics to be computed for each category, e.g.
class, separately. Will be encoded as `name_i` in the output dictionary,
where `name` is the name of the evaluation metrics and `i` the index of
the category.
Returns
-------
Ordered dict
Ordered dict with name of evaluation functions as keys and evaluation
metrics as values.
"""
idx = y.sum(axis=1) > 0
y = y[idx]
z = z[idx]
p = OrderedDict()
for metric in metrics:
p[metric.__name__] = metric(y, z)
if binary_metrics:
for i in range(y.shape[1]):
for metric in binary_metrics:
p['%s_%d' % (metric.__name__, i)] = metric(y[:, i], z[:, i])
p['n'] = len(y)
return p
def get_output_metrics(output_name):
"""Return list of evaluation metrics for model output name."""
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[0] == 'cpg':
metrics = CLA_METRICS
elif _output_name[0] == 'bulk':
metrics = REG_METRICS + CLA_METRICS
elif _output_name[-1] in ['diff', 'mode', 'cat2_var']:
metrics = CLA_METRICS
elif _output_name[-1] == 'mean':
metrics = REG_METRICS + CLA_METRICS + [kendall]
elif _output_name[-1] == 'var':
metrics = REG_METRICS + [kendall]
else:
raise ValueError('Invalid output name "%s"!' % output_name)
return metrics
def evaluate_outputs(outputs, preds):
"""Evaluate performance metrics of multiple outputs.
Given the labels and predictions of multiple outputs, chooses and computes
performance metrics of each output depending on its name.
Parameters
----------
outputs: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with labels as value.
preds: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with predictions as value.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with columns `metric`, `output`, `value`.
"""
perf = []
for output_name in outputs:
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[-1] in ['cat_var']:
tmp = evaluate_cat(outputs[output_name],
preds[output_name],
binary_metrics=[auc])
else:
metrics = get_output_metrics(output_name)
tmp = evaluate(outputs[output_name],
preds[output_name],
metrics=metrics)
tmp = pd.DataFrame({'output': output_name,
'metric': list(tmp.keys()),
'value': list(tmp.values())})
perf.append(tmp)
perf = pd.concat(perf)
perf = perf[['metric', 'output', 'value']]
perf.sort_values(['metric', 'value'], inplace=True)
return perf
def is_binary_output(output_name):
"""Return `True` if `output_name` is binary."""
_output_name = output_name.split(OUTPUT_SEP)
if _output_name[0] == 'cpg':
return True
elif _output_name[-1] in ['diff', 'mode', 'cat2_var']:
return True
else:
return False
def evaluate_curve(outputs, preds, fun=skm.roc_curve, mask=CPG_NAN,
nb_point=None):
"""Evaluate performance curves of multiple outputs.
Given the labels and predictions of multiple outputs, computes a performance
a curve, e.g. ROC or PR curve, for each output.
Parameters
----------
outputs: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with labels as value.
preds: dict
`dict` with the name of outputs as keys and a :class:`numpy.ndarray`
vector with predictions as value.
fun: function
Function to compute the performance curves.
mask: scalar
Value to mask unobserved labels in `y`.
nb_point: int
Maximum number of points to curve to reduce memory.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with columns `output`, `x`, `y`, `thr`.
"""
curves = []
for output_name in outputs.keys():
if not is_binary_output(output_name):
continue
output = outputs[output_name].round().squeeze()
pred = preds[output_name].squeeze()
idx = output != CPG_NAN
output = output[idx]
pred = pred[idx]
x, y, thr = fun(output, pred)
length = min(len(x), len(y), len(thr))
if nb_point and length > nb_point:
idx = np.linspace(0, length - 1, nb_point).astype(np.int32)
else:
idx = slice(0, length)
x = x[idx]
y = y[idx]
thr = thr[idx]
curve = OrderedDict()
curve['output'] = output_name
curve['x'] = x
curve['y'] = y
curve['thr'] = thr
curve = pd.DataFrame(curve)
curves.append(curve)
if not curves:
return None
else:
curves = pd.concat(curves)
return curves
def unstack_report(report):
"""Unstack performance report.
Reshapes a :class:`pandas.DataFrame` of :func:`evaluate_outputs` such that
performance metrics are listed as columns.
Parameters
----------
report: :class:`pandas.DataFrame`
:class:`pandas.DataFrame` from :func:`evaluate_outputs`.
Returns
-------
:class:`pandas.DataFrame`
:class:`pandas.DataFrame` with performance metrics as columns.
"""
index = list(report.columns[~report.columns.isin(['metric', 'value'])])
report = pd.pivot_table(report, index=index, columns='metric',
values='value')
report.reset_index(index, inplace=True)
report.columns.name = None
# Sort columns
columns = list(report.columns)
sorted_columns = []
for fun in CAT_METRICS + CLA_METRICS + REG_METRICS:
for i, column in enumerate(columns):
if column.startswith(fun.__name__):
sorted_columns.append(column)
sorted_columns = index + sorted_columns
sorted_columns += [col for col in columns if col not in sorted_columns]
report = report[sorted_columns]
order = []
if 'auc' in report.columns:
order.append(('auc', False))
elif 'mse' in report.columns:
order.append(('mse', True))
elif 'acc' in report.columns:
order.append(('acc', False))
report.sort_values([x[0] for x in order],
ascending=[x[1] for x in order],
inplace=True)
return report
def get(name):
"""Return object from module by its name."""
return get_from_module(name, globals())
|
sendrecv/gst-sharp/nuget.py | heftig/gstwebrtc-demos | 451 | 11122480 | #!/usr/bin/python3
import argparse
import getpass
import os
import sys
import shutil
import subprocess
from datetime import datetime
from urllib.request import urlretrieve
from zipfile import ZipFile
NUSPEC_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<package xmlns="http://schemas.microsoft.com/packaging/2011/08/nuspec.xsd">
<metadata>
<id>{package_name}</id>
<authors>{author}</authors>
<owners>{owner}</owners>
<licenseUrl>{license_url}</licenseUrl>
<projectUrl>{project_url}</projectUrl>
<iconUrl>{icon_url}</iconUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>{description}.</description>
<copyright>{copyright}</copyright>
<tags>{tags}</tags>
<version>{version}</version>
<dependencies>
{dependencies} </dependencies>
</metadata>
<files>
{files} </files>
</package>
"""
TARGETS_TEMPLATE = r"""<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Target Name="{package_name}CopyMapConfigs" AfterTargets="AfterBuild">
<CreateItem Include="$(MSBuildThisFileDirectory)\{frameworkdir}\*.config">
<Output TaskParameter="Include" ItemName="MapConfigs" />
</CreateItem>
<Copy SourceFiles="@(MapConfigs)" DestinationFiles="@(MapConfigs->'$(OutDir)\%(RecursiveDir)%(Filename)%(Extension)')" />
</Target>
</Project>"""
class Nugetifier:
def cleanup_args(self):
self.nugetdir = os.path.join(self.builddir,
self.package_name + 'nupkg')
self.frameworkdir = 'net45'
self.nuget_build_dir = os.path.join(
self.nugetdir, 'build', self.frameworkdir)
self.nuget_lib_dir = os.path.join(
self.nugetdir, 'lib', self.frameworkdir)
self.nuspecfile = os.path.join(
self.nugetdir, '%s.nuspec' % self.package_name)
self.nugettargets = os.path.join(
self.nuget_build_dir, "%s.targets" % self.package_name)
self.nuget = shutil.which('nuget')
if not self.nuget:
print("Could not find the `nuget` tool, install it and retry!")
return -1
for d in [self.nugetdir, self.nuget_lib_dir, self.nuget_build_dir]:
os.makedirs(d, exist_ok=True)
if not self.description:
self.description = "%s c# bindings" % self.package_name
if not self.copyright:
self.copyright = "Copyright %s" % datetime.now().year
if not self.tags:
self.tags = self.package_name
return 0
def run(self):
res = self.cleanup_args()
if res:
return res
self.files = ''
def add_file(path, target="lib"):
f = ' <file src="%s" target="%s"/>\n' % (
path, os.path.join(target, os.path.basename(path)))
self.files += f
self.dependencies = ''
for dependency in self.dependency:
_id, version = dependency.split(":")
self.dependencies += ' <dependency id="%s" version="%s" />\n' % (
_id, version)
for assembly in self.assembly:
add_file(assembly, os.path.join('lib', self.frameworkdir))
for f in [assembly + '.config', assembly[:-3] + 'pdb']:
if os.path.exists(f):
add_file(f, os.path.join('build', self.frameworkdir))
with open(self.nugettargets, 'w') as _:
print(TARGETS_TEMPLATE.format(**self.__dict__), file=_)
add_file(self.nugettargets, 'build')
with open(self.nuspecfile, 'w') as _:
print(NUSPEC_TEMPLATE.format(**self.__dict__), file=_)
subprocess.check_call([self.nuget, 'pack', self.nuspecfile],
cwd=self.builddir)
class NugetDownloader:
def reporthook(self, blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
def run(self):
url = "https://www.nuget.org/api/v2/package/{nuget_name}/{nuget_version}".format(
**self.__dict__)
workdir = os.path.join(self.current_builddir,
self.nuget_name, self.nuget_version)
os.makedirs(workdir, exist_ok=True)
try:
with open(os.path.join(workdir, 'linkline'), 'r') as f:
print(f.read())
return
except FileNotFoundError:
pass
nugetpath = os.path.join(workdir, self.nuget_name) + '.zip'
print("Downloading %s into %s" % (url, nugetpath), file=sys.stderr)
urlretrieve(url, nugetpath, self.reporthook)
lib_paths = [os.path.join('lib', self.csharp_version), 'lib']
build_path = os.path.join('build', self.csharp_version)
dll_path = os.path.join(self.nuget_name, self.nuget_version)
extract_dir = os.path.join(self.current_builddir, dll_path)
os.makedirs(extract_dir, exist_ok=True)
linkline = ''
print("%s - %s" % (self.builddir, extract_dir), file=sys.stderr)
configs = []
dlldir = None
with ZipFile(nugetpath) as zip:
for lib_path in lib_paths:
for f in zip.infolist():
if f.filename.startswith(lib_path) or f.filename.startswith(build_path):
zip.extract(f, path=extract_dir)
if f.filename.endswith('.dll'):
fpath = os.path.relpath(os.path.join(extract_dir, f.filename), self.builddir)
linkline += ' -r:' + fpath
dlldir = os.path.dirname(os.path.join(extract_dir, f.filename))
elif f.filename.endswith('.dll.config'):
configs.append(os.path.join(extract_dir, f.filename))
if dlldir:
break
print(dlldir, file=sys.stderr)
for config in configs:
print(config, file=sys.stderr)
print(os.path.join(dlldir, os.path.basename(config)), file=sys.stderr)
os.rename(config, os.path.join(dlldir, os.path.basename(config)))
with open(os.path.join(workdir, 'linkline'), 'w') as f:
print(linkline.strip(), file=f)
print(linkline.strip())
if __name__ == "__main__":
if "get" not in sys.argv:
parser = argparse.ArgumentParser()
parser.add_argument('--builddir')
parser.add_argument('--package-name')
parser.add_argument('--author', default=getpass.getuser())
parser.add_argument('--owner', default=getpass.getuser())
parser.add_argument('--native', action='append', default=[])
parser.add_argument('--assembly', action='append', default=[])
parser.add_argument('--out')
parser.add_argument('--description')
parser.add_argument('--copyright')
parser.add_argument('--version')
parser.add_argument('--icon-url', default='')
parser.add_argument('--project-url', default='')
parser.add_argument('--license-url', default='')
parser.add_argument('--tags', default='')
parser.add_argument('--dependency', default=[], action='append')
runner = Nugetifier()
else:
sys.argv.remove('get')
parser = argparse.ArgumentParser()
parser.add_argument('--builddir')
parser.add_argument('--current-builddir')
parser.add_argument('--nuget-name')
parser.add_argument('--nuget-version')
parser.add_argument('--csharp-version')
runner = NugetDownloader()
options = parser.parse_args(namespace=runner)
exit(runner.run())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.