max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
utilities/Hive_metastore_migration/src/export_from_datacatalog.py | xy1m/aws-glue-samples | 925 | 12735043 | # Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from __future__ import print_function
from awsglue.context import GlueContext
from hive_metastore_migration import *
CONNECTION_TYPE_NAME = 'com.amazonaws.services.glue.connections.DataCatalogConnection'
def transform_catalog_to_df(dyf):
return dyf.toDF()
def datacatalog_migrate_to_s3(databases, tables, partitions, output_path):
# load
databases.write.format('json').mode('overwrite').save(output_path + 'databases')
tables.write.format('json').mode('overwrite').save(output_path + 'tables')
partitions.write.format('json').mode('overwrite').save(output_path + 'partitions')
# apply hard-coded schema on dataframes, ensure schema is consistent for transformations
def change_schemas(sql_context, databases, tables, partitions):
databases = sql_context.read.json(databases.toJSON(), schema=DATACATALOG_DATABASE_SCHEMA)
tables = sql_context.read.json(tables.toJSON(), schema=DATACATALOG_TABLE_SCHEMA)
partitions = sql_context.read.json(partitions.toJSON(), schema=DATACATALOG_PARTITION_SCHEMA)
return (databases, tables, partitions)
def datacatalog_migrate_to_hive_metastore(sc, sql_context, databases, tables, partitions, connection):
hive_metastore = HiveMetastore(connection, sql_context)
transform_databases_tables_partitions(sc, sql_context, hive_metastore, databases, tables, partitions)
hive_metastore.export_to_metastore()
def read_databases_from_catalog(sql_context, glue_context, datacatalog_name, database_arr, region):
databases = None
tables = None
partitions = None
for database in database_arr:
dyf = glue_context.create_dynamic_frame.from_options(
connection_type=CONNECTION_TYPE_NAME,
connection_options={'catalog.name': datacatalog_name,
'catalog.database': database,
'catalog.region': region})
df = transform_catalog_to_df(dyf)
# filter into databases, tables, and partitions
dc_databases_no_schema = df.where('type = "database"')
dc_tables_no_schema = df.where('type = "table"')
dc_partitions_no_schema = df.where('type = "partition"')
# apply schema to dataframes
(dc_databases, dc_tables, dc_partitions) = \
change_schemas(sql_context, dc_databases_no_schema, dc_tables_no_schema, dc_partitions_no_schema)
(a_databases, a_tables, a_partitions) = \
transform_items_to_item(dc_databases=dc_databases, dc_tables=dc_tables, dc_partitions=dc_partitions)
databases = databases.union(a_databases) if databases else a_databases
tables = tables.union(a_tables) if tables else a_tables
partitions = partitions.union(a_partitions) if partitions else a_partitions
return (databases, tables, partitions)
def main():
to_s3 = 'to-s3'
to_jdbc = 'to-jdbc'
parser = argparse.ArgumentParser(prog=sys.argv[0])
parser.add_argument('-m', '--mode', required=True, choices=[to_s3, to_jdbc], help='Choose to migrate from datacatalog to s3 or to metastore')
parser.add_argument('--database-names', required=True, help='Semicolon-separated list of names of database in Datacatalog to export')
parser.add_argument('-o', '--output-path', required=False, help='Output path, either local directory or S3 path')
parser.add_argument('-c', '--connection-name', required=False, help='Glue Connection name for Hive metastore JDBC connection')
parser.add_argument('-R', '--region', required=False, help='AWS region of source Glue DataCatalog, default to "us-east-1"')
options = get_options(parser, sys.argv)
if options['mode'] == to_s3:
validate_options_in_mode(
options=options, mode=to_s3,
required_options=['output_path'],
not_allowed_options=['connection_name']
)
elif options['mode'] == to_jdbc:
validate_options_in_mode(
options=options, mode=to_jdbc,
required_options=['connection_name'],
not_allowed_options=['output_path']
)
else:
raise AssertionError('unknown mode ' + options['mode'])
validate_aws_regions(options['region'])
# spark env
(conf, sc, sql_context) = get_spark_env()
glue_context = GlueContext(sc)
# extract from datacatalog reader
database_arr = options['database_names'].split(';')
(databases, tables, partitions) = read_databases_from_catalog(
sql_context=sql_context,
glue_context=glue_context,
datacatalog_name='datacatalog',
database_arr=database_arr,
region=options.get('region') or 'us-east-1'
)
if options['mode'] == to_s3:
output_path = get_output_dir(options['output_path'])
datacatalog_migrate_to_s3(
databases=databases,
tables=tables,
partitions=partitions,
output_path=output_path
)
elif options['mode'] == to_jdbc:
connection_name = options['connection_name']
datacatalog_migrate_to_hive_metastore(
sc=sc,
sql_context=sql_context,
databases=databases,
tables=tables,
partitions=partitions,
connection=glue_context.extract_jdbc_conf(connection_name)
)
if __name__ == '__main__':
main()
|
mindinsight/explainer/encapsulator/evaluation_encap.py | fapbatista/mindinsight | 216 | 12735050 | <filename>mindinsight/explainer/encapsulator/evaluation_encap.py<gh_stars>100-1000
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Explainer evaluation encapsulator."""
import copy
from mindinsight.explainer.encapsulator.explain_data_encap import ExplainDataEncap
from mindinsight.datavisual.common.exceptions import TrainJobNotExistError
class EvaluationEncap(ExplainDataEncap):
"""Explainer evaluation encapsulator."""
def query_explainer_scores(self, train_id):
"""Query evaluation scores."""
job = self.job_manager.get_job(train_id)
if job is None:
raise TrainJobNotExistError(train_id)
return copy.deepcopy(job.explainer_scores)
|
tools/deep_memory_profiler/subcommands/upload.py | kjthegod/chromium | 231 | 12735094 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import tempfile
import zipfile
from lib.subcommand import SubCommand
from lib.symbol import SymbolDataSources
LOGGER = logging.getLogger('dmprof')
class UploadCommand(SubCommand):
def __init__(self):
super(UploadCommand, self).__init__(
'Usage: %prog upload [--gsutil path/to/gsutil] '
'<first-dump> <destination-gs-path>')
self._parser.add_option('--gsutil', default='gsutil',
help='path to GSUTIL', metavar='GSUTIL')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
gs_path = args[2]
dump_files = SubCommand._find_all_dumps(dump_path)
bucket_files = SubCommand._find_all_buckets(dump_path)
prefix = SubCommand._find_prefix(dump_path)
symbol_data_sources = SymbolDataSources(prefix)
symbol_data_sources.prepare()
symbol_path = symbol_data_sources.path()
handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof')
os.close(handle_zip)
try:
file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in dump_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
for filename in bucket_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
symbol_basename = os.path.basename(os.path.abspath(symbol_path))
for filename in os.listdir(symbol_path):
if not filename.startswith('.'):
file_zip.write(os.path.join(symbol_path, filename),
os.path.join(symbol_basename, os.path.basename(
os.path.abspath(filename))))
file_zip.close()
returncode = UploadCommand._run_gsutil(
options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path)
finally:
os.remove(filename_zip)
return returncode
@staticmethod
def _run_gsutil(gsutil, *args):
"""Run gsutil as a subprocess.
Args:
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The return code from the process.
"""
command = [gsutil] + list(args)
LOGGER.info("Running: %s", command)
try:
return subprocess.call(command)
except OSError, e:
LOGGER.error('Error to run gsutil: %s', e)
|
LeetCode/python3/122.py | ZintrulCre/LeetCode_Archiver | 279 | 12735097 | <filename>LeetCode/python3/122.py
class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
size = len(prices)
bought = False
profit = 0
price = 0
for i in range(0, size - 1):
if not bought:
if prices[i] < prices[i + 1]:
bought = True
price = prices[i]
else:
if prices[i] > prices[i + 1]:
bought = False
profit += prices[i] - price
price = 0
if bought:
profit += prices[i + 1] - price
return profit |
vnpy/gateway/sopttest/__init__.py | funrunskypalace/vnpy | 19,529 | 12735112 | <reponame>funrunskypalace/vnpy
from .sopttest_gateway import SopttestGateway
|
SSR-Net/data/TYY_XSDATA_create_db.py | bleakie/MaskInsightface | 269 | 12735141 | <reponame>bleakie/MaskInsightface
import numpy as np
import cv2
import os
import argparse
import csv
def get_args():
parser = argparse.ArgumentParser(description="This script cleans-up noisy labels "
"and creates database for training.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", type=str, default="/home/sai/YANG/datasets/face_datasets/megaage_asian/megaage_asian/train_crop/",
help="dataset; wiki or imdb")
parser.add_argument("--output", type=str, default='/home/sai/YANG/datasets/face_datasets/megaage_asian/megaage_asian/megaage_asian.npz',
help="path to output database mat file")
parser.add_argument('--label', default='/home/sai/YANG/datasets/face_datasets/megaage_asian/megaage_asian/train.csv', help='')
parser.add_argument("--img_size", type=int, default=112,
help="output image size")
args = parser.parse_args()
return args
def main():
args = get_args()
out_genders = []
out_ages = []
out_imgs = []
labelList = csv.reader(open(args.label, "rt", encoding="utf-8-sig"))
for row in labelList:
true_age = int(row[1])
true_gender = int(0)
img_id = row[0]
img = cv2.imread(os.path.join(args.input, img_id))
if img is None:
continue
out_genders.append(true_gender)
out_ages.append(true_age)
out_imgs.append(cv2.resize(img, (args.img_size, args.img_size)))
np.savez(args.output, image=np.array(out_imgs), gender=np.array(out_genders), age=np.array(out_ages),
img_size=args.img_size)
if __name__ == '__main__':
main()
|
leo/unittests/test_doctests.py | thomasbuttler/leo-editor | 1,550 | 12735170 | # -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20210926044012.1: * @file ../unittests/test_doctests.py
#@@first
"""Run all doctests."""
import doctest
import glob
import os
import unittest
from leo.core import leoGlobals as g
unittest_dir = os.path.dirname(__file__)
leo_dir = os.path.abspath(os.path.join(unittest_dir, '..'))
#@+others # Define a function containing a doctest.
#@+node:ekr.20210926053601.1: ** factorial (test_dectests.py)
def factorial(n):
# Modified from https://docs.python.org/3/library/doctest.html
# Must import factorial. See: stackoverflow.com/questions/65066002
"""Return the factorial of n, an exact integer >= 0.
>>> from leo.unittests.test_doctests import factorial
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
""" # Blank line above is required.
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n+1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
#@-others
class TestDocTests(unittest.TestCase): # No need to be a subclass of leoTest2.LeoUnitTest.
def test_all_doctests(self):
fails_list = [] # List of files with failing doctests.
files_list = [] # List of files containing a doctest.
n = 0 # Total doctests found
for module in ('core', 'plugins', 'unittests'):
module_path = os.path.join(leo_dir, module)
self.assertTrue(os.path.exists(module_path), msg=repr(module_path))
path = os.path.join(module_path, '**', '*.py')
files = glob.glob(path, recursive=True)
files = [z for z in files if not z.endswith('__init__.py')]
for f in files:
# Exclude two problematic files.
if 'dtest.py' in f or 'javascript.py' in f:
continue
fails, count = doctest.testfile(f)
n += count
if count:
files_list.append(f)
if fails:
fails_list.append(f)
print(f"{fails} failures in {g.shortFileName(f)}")
self.assertEqual(fails_list, [])
if 0:
g.trace(f"{n} doctests found in {len(files_list)} file{g.plural(len(files_list))}")
g.printObj(files_list, tag="files containing any doctest")
g.printObj(fails_list, tag="files containing a failed doctest")
#@-leo
|
tests/bytecode/mp-tests/if2.py | LabAixBidouille/micropython | 303 | 12735204 | <reponame>LabAixBidouille/micropython
def f(x):
if x:
return
if x:
return
elif y:
return
if x:
return
else:
return
if x:
return
elif y:
return
else:
return
if x:
return
elif y:
return
elif z:
return
else:
return
return None
|
338 Counting Bits.py | ChiFire/legend_LeetCode | 872 | 12735227 | <gh_stars>100-1000
"""
Given a non negative integer number num. For every numbers i in the range 0 <= i <= num calculate the number of 1's in
their binary representation and return them as an array.
Example:
For num = 5 you should return [0,1,1,2,1,2].
Follow up:
It is very easy to come up with a solution with run time O(n*sizeof(integer)). But can you do it in linear time O(n) /
possibly in a single pass?
Space complexity should be O(n).
Can you do it like a boss? Do it without using any builtin function like __builtin_popcount in c++ or in any other
language.
"""
__author__ = 'Daniel'
class Solution(object):
def countBits(self, num):
"""
Dynamic programming: make use of what you have produced already
0 => 0
1 => 1
10 => 1+0
11 => 1+1
100 => 1+0
101 => 1+1
110 => 1+1
111 => 1+2
:type num: int
:rtype: List[int]
"""
ret = [0]
i = 0
hi = len(ret)
while len(ret) < num + 1:
if i == hi:
i = 0
hi = len(ret)
ret.append(1+ret[i])
i += 1
return ret
|
tests/unit/small_text/utils/test_data.py | chschroeder/small-text | 218 | 12735301 | import unittest
import numpy as np
from small_text.utils.data import list_length
class DataUtilsTest(unittest.TestCase):
def test_list_length(self):
self.assertEqual(10, list_length(list(range(10))))
self.assertEqual(10, list_length(np.random.rand(10, 2)))
|
avionics/network/network_yaml_test.py | leozz37/makani | 1,178 | 12735307 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the network_util module that require the full network.yaml."""
import os
import unittest
import makani
from makani.avionics.network import network_config
from makani.avionics.network import network_util
class NetworkYamlTest(unittest.TestCase):
def setUp(self):
filename = os.path.join(makani.HOME, 'avionics/network/network.yaml')
self._network_config = network_config.NetworkConfig(filename)
def testCheckForLoopRoutes(self):
config = self._network_config
message_types = config.all_messages
path_finder = network_util.PathFinder(config.GetSwitches(), message_types)
for message in message_types:
graph = network_util.MessageGraph(path_finder, message)
visitor = network_util.MessageGraphVisitor()
graph.VisitSenders(visitor, message.all_senders)
def testCheckForUnintendedRecipients(self):
config = self._network_config
message_types = config.all_messages
path_finder = network_util.PathFinder(config.GetSwitches(), message_types)
for message in message_types:
graph = network_util.MessageGraph(path_finder, message)
network_util.CheckForUnintendedRecipients(graph)
if __name__ == '__main__':
unittest.main()
|
tests/python_slices/list.py | hixio-mh/plugin-python | 362 | 12735326 | <gh_stars>100-1000
a[-1]
a[-2:]
a[:-2]
a[::-1]
a[1::-1]
a[:-3:-1]
a[-3::-1]
point_coords = coords[i, :]
main(sys.argv[1:])
|
nbdt/data/pascal_context.py | XAVILLA/nbdt | 536 | 12735348 | <gh_stars>100-1000
###########################################################################
# Created by: <NAME>
# Email: <EMAIL>
# Copyright (c) 2017
###########################################################################
from PIL import Image, ImageOps, ImageFilter
import os
import math
import random
import numpy as np
from tqdm import trange
import torch
import torch.utils.data as data
__all__ = names = ("PascalContext",)
class BaseDataset(data.Dataset):
def __init__(
self,
root,
split,
mode=None,
transform=None,
target_transform=None,
base_size=520,
crop_size=480,
):
self.root = root
self.transform = transform
self.target_transform = target_transform
self.split = split
self.mode = mode if mode is not None else split
self.base_size = base_size
self.crop_size = crop_size
if self.mode == "train":
print(
"BaseDataset: base_size {}, crop_size {}".format(base_size, crop_size)
)
def __getitem__(self, index):
raise NotImplemented
@property
def num_class(self):
return self.NUM_CLASS
@property
def pred_offset(self):
raise NotImplemented
def make_pred(self, x):
return x + self.pred_offset
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.0))
y1 = int(round((h - outsize) / 2.0))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
return img, self._mask_transform(mask)
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
w, h = img.size
long_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
if h > w:
oh = long_size
ow = int(1.0 * w * long_size / h + 0.5)
short_size = ow
else:
ow = long_size
oh = int(1.0 * h * long_size / w + 0.5)
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# final transform
return img, self._mask_transform(mask)
def _mask_transform(self, mask):
return torch.from_numpy(np.array(mask)).long()
class PascalContext(BaseDataset):
NUM_CLASS = 59
def __init__(
self,
root="./data",
split="train",
mode=None,
transform=None,
target_transform=None,
**kwargs
):
super(PascalContext, self).__init__(
root, split, mode, transform, target_transform, **kwargs
)
from detail import Detail
# from detail import mask
root = os.path.join(root, "PascalContext")
annFile = os.path.join(root, "trainval_merged.json")
imgDir = os.path.join(root, "JPEGImages")
# training mode
self.detail = Detail(annFile, imgDir, split)
self.transform = transform
self.target_transform = target_transform
self.ids = self.detail.getImgs()
# generate masks
self._mapping = np.sort(
np.array(
[
0,
2,
259,
260,
415,
324,
9,
258,
144,
18,
19,
22,
23,
397,
25,
284,
158,
159,
416,
33,
162,
420,
454,
295,
296,
427,
44,
45,
46,
308,
59,
440,
445,
31,
232,
65,
354,
424,
68,
326,
72,
458,
34,
207,
80,
355,
85,
347,
220,
349,
360,
98,
187,
104,
105,
366,
189,
368,
113,
115,
]
)
)
self.classes = [
"background",
"aeroplane",
"mountain",
"mouse",
"track",
"road",
"bag",
"motorbike",
"fence",
"bed",
"bedclothes",
"bench",
"bicycle",
"diningtable",
"bird",
"person",
"floor",
"boat",
"train",
"book",
"bottle",
"tree",
"window",
"plate",
"platform",
"tvmonitor",
"building",
"bus",
"cabinet",
"shelves",
"light",
"pottedplant",
"wall",
"car",
"ground",
"cat",
"sidewalk",
"truck",
"ceiling",
"rock",
"chair",
"wood",
"food",
"horse",
"cloth",
"sign",
"computer",
"sheep",
"keyboard",
"flower",
"sky",
"cow",
"grass",
"cup",
"curtain",
"snow",
"water",
"sofa",
"dog",
"door",
]
self._key = np.array(range(len(self._mapping))).astype("uint8")
mask_file = os.path.join(root, self.split + ".pth")
print("mask_file:", mask_file)
if os.path.exists(mask_file):
self.masks = torch.load(mask_file)
else:
self.masks = self._preprocess(mask_file)
def _class_to_index(self, mask):
# assert the values
values = np.unique(mask)
for i in range(len(values)):
assert values[i] in self._mapping
index = np.digitize(mask.ravel(), self._mapping, right=True)
return self._key[index].reshape(mask.shape)
def _preprocess(self, mask_file):
masks = {}
tbar = trange(len(self.ids))
print(
"Preprocessing mask, this will take a while."
+ "But don't worry, it only run once for each split."
)
for i in tbar:
img_id = self.ids[i]
mask = Image.fromarray(self._class_to_index(self.detail.getMask(img_id)))
masks[img_id["image_id"]] = mask
tbar.set_description("Preprocessing masks {}".format(img_id["image_id"]))
torch.save(masks, mask_file)
return masks
def __getitem__(self, index):
img_id = self.ids[index]
path = img_id["file_name"]
iid = img_id["image_id"]
img = Image.open(os.path.join(self.detail.img_folder, path)).convert("RGB")
if self.mode == "test":
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(path)
# convert mask to 60 categories
mask = self.masks[iid]
# synchrosized transform
if self.mode == "train":
img, mask = self._sync_transform(img, mask)
elif self.mode == "val":
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == "testval"
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _mask_transform(self, mask):
target = np.array(mask).astype("int32") - 1
return torch.from_numpy(target).long()
def __len__(self):
return len(self.ids)
@property
def pred_offset(self):
return 1
|
api-inference-community/tests/test_normalizers.py | mlonaws/huggingface_hub | 362 | 12735350 | <reponame>mlonaws/huggingface_hub
from unittest import TestCase
import torch
from api_inference_community.normalizers import speaker_diarization_normalize
class NormalizersTestCase(TestCase):
def test_speaker_diarization_dummy(self):
tensor = torch.zeros((10, 2))
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1"]
)
self.assertEqual(outputs, [])
def test_speaker_diarization(self):
tensor = torch.zeros((10, 2))
tensor[1:4, 0] = 1
tensor[3:8, 1] = 1
tensor[8:10, 0] = 1
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1"]
)
self.assertEqual(
outputs,
[
{"class": "SPEAKER_0", "start": 1 / 16000, "end": 4 / 16000},
{"class": "SPEAKER_1", "start": 3 / 16000, "end": 8 / 16000},
{"class": "SPEAKER_0", "start": 8 / 16000, "end": 10 / 16000},
],
)
def test_speaker_diarization_3_speakers(self):
tensor = torch.zeros((10, 3))
tensor[1:4, 0] = 1
tensor[3:8, 1] = 1
tensor[8:10, 2] = 1
with self.assertRaises(ValueError):
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1"]
)
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1", "SPEAKER_2"]
)
self.assertEqual(
outputs,
[
{"class": "SPEAKER_0", "start": 1 / 16000, "end": 4 / 16000},
{"class": "SPEAKER_1", "start": 3 / 16000, "end": 8 / 16000},
{"class": "SPEAKER_2", "start": 8 / 16000, "end": 10 / 16000},
],
)
|
components/test/data/autofill/automated_integration/task_flow.py | zealoussnow/chromium | 14,668 | 12735357 | <filename>components/test/data/autofill/automated_integration/task_flow.py<gh_stars>1000+
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome Autofill Task Flow
Execute a set of autofill tasks in a fresh ChromeDriver instance that has been
pre-loaded with some default profile.
Requires:
- Selenium python bindings
http://selenium-python.readthedocs.org/
- ChromeDriver
https://sites.google.com/a/chromium.org/chromedriver/downloads
The ChromeDriver executable must be available on the search PATH.
- Chrome
"""
import abc
from urlparse import urlparse
import os
import shutil
from random import choice
from string import ascii_lowercase
from selenium import webdriver
from selenium.common.exceptions import TimeoutException, WebDriverException
from selenium.webdriver.chrome.options import Options
class TaskFlow(object):
"""Represents an executable set of Autofill Tasks.
Attributes:
profile: Dict of profile data that acts as the master source for
validating autofill behaviour.
debug: Whether debug output should be printed (False if not specified).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, profile, debug=False):
self.set_profile(profile)
self._debug = debug
self._running = False
self._tasks = self._generate_task_sequence()
def set_profile(self, profile):
"""Validates |profile| before assigning it as the source of user data.
Args:
profile: Dict of profile data that acts as the master source for
validating autofill behaviour.
Raises:
ValueError: The |profile| dict provided is missing required keys
"""
if not isinstance(profile, dict):
raise ValueError('profile must be a a valid dictionary');
self._profile = profile
def run(self, user_data_dir, chrome_binary=None):
"""Generates and executes a sequence of chrome driver tasks.
Args:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
Raises:
RuntimeError: Running the TaskFlow was attempted while it's already
running.
Exception: Any failure encountered while running the tests
"""
if self._running:
raise RuntimeError('Cannot run TaskFlow when already running')
self._running = True
self._run_tasks(user_data_dir, chrome_binary=chrome_binary)
self._running = False
@abc.abstractmethod
def _generate_task_sequence(self):
"""Generates a set of executable tasks that will be run in ChromeDriver.
Note: Subclasses must implement this method.
Raises:
NotImplementedError: Subclass did not implement the method
Returns:
A list of AutofillTask instances that are to be run in ChromeDriver.
These tasks are to be run in order.
"""
raise NotImplementedError()
def _run_tasks(self, user_data_dir, chrome_binary=None):
"""Runs the internal set of tasks in a fresh ChromeDriver instance.
Args:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
Raises:
Exception: Any failure encountered while running the tests
"""
driver = self._get_driver(user_data_dir, chrome_binary=chrome_binary)
try:
for task in self._tasks:
task.run(driver)
finally:
driver.quit()
shutil.rmtree(self._profile_dir_dst)
def _get_driver(self, user_data_dir, profile_name=None, chrome_binary=None,
chromedriver_binary='chromedriver'):
"""Spin up a ChromeDriver instance that uses a given set of user data.
Generates a temporary profile data directory using a local set of test data.
Args:
user_data_dir: Path string for the writable directory in which profiles
should be stored.
profile_name: Name of the profile data directory to be created/used in
user_data_dir.
If None then an eight character name will be generated randomly.
This directory will be removed after the task flow completes.
chrome_binary: Path string to the Chrome binary that should be used by
ChromeDriver.
If None then it will use the PATH to find a binary.
Returns: The generated Chrome Driver instance.
"""
options = Options()
if profile_name is None:
profile_name = ''.join(choice(ascii_lowercase) for i in range(8))
options.add_argument('--profile-directory=%s' % profile_name)
full_path = os.path.realpath(__file__)
path, filename = os.path.split(full_path)
profile_dir_src = os.path.join(path, 'testdata', 'Default')
self._profile_dir_dst = os.path.join(user_data_dir, profile_name)
self._copy_tree(profile_dir_src, self._profile_dir_dst)
if chrome_binary is not None:
options.binary_location = chrome_binary
options.add_argument('--user-data-dir=%s' % user_data_dir)
options.add_argument('--show-autofill-type-predictions')
service_args = []
driver = webdriver.Chrome(executable_path=chromedriver_binary,
chrome_options=options,
service_args=service_args)
driver.set_page_load_timeout(15) # seconds
return driver
def _copy_tree(self, src, dst):
"""Recursively copy a directory tree.
If the destination directory does not exist then it will be created for you.
Doesn't overwrite newer existing files.
Args:
src: Path to the target source directory. It must exist.
dst: Path to the target destination directory. Permissions to create the
the directory (if necessary) and modify it's contents.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
src_item = os.path.join(src, item)
dst_item = os.path.join(dst, item)
if os.path.isdir(src_item):
self._copy_tree(src_item, dst_item)
elif (not os.path.exists(dst_item) or
os.stat(src_item).st_mtime - os.stat(dst_item).st_mtime > 1):
# Copy a file if it doesn't already exist, or if existing one is older.
shutil.copy2(src_item, dst_item)
|
capstone/cite/templatetags/redaction.py | rachelaus/capstone | 134 | 12735370 | from django import template
register = template.Library()
@register.filter()
def redact(text, case):
return case.redact_obj(text)
@register.filter()
def elide(text, case):
return case.elide_obj(text) |
Python/OOP/Constructor.py | piovezan/SOpt | 148 | 12735371 | class A(object):
def A():
print('factory')
return A()
def __init__(self):
print('init')
def __call__(self):
print('call')
print('chamar o construtor')
a = A()
print('chamar o construtor e a função')
b = A()()
print('chamar a função')
c = A.A()
#https://pt.stackoverflow.com/q/109813/101
|
unittest/scripts/auto/py_shell/scripts/mysqlsh_module_norecord.py | mueller/mysql-shell | 119 | 12735375 |
#@<> Setup
testutil.deploy_sandbox(__mysql_sandbox_port1, "root")
#@<> Setup cluster
import mysqlsh
mydba = mysqlsh.connect_dba(__sandbox_uri1)
cluster = mydba.create_cluster("mycluster")
cluster.disconnect()
#@<> Catch error through mysqlsh.Error
try:
mydba.get_cluster("badcluster")
testutil.fail("<red>Function didn't throw exception as expected</red>")
except mysqlsh.Error as e:
EXPECT_EQ(51101, e.code)
except:
testutil.fail("<red>Function threw wrong exception</red>")
#@<> dba.session
mydba.session.run_sql("select 1")
#@<> DbError should be a subclass of Error
try:
mydba.session.run_sql("badquery")
testutil.fail("<red>Function didn't throw exception as expected</red>")
except mysqlsh.DBError as e:
EXPECT_EQ(mysql.ErrorCode.ER_PARSE_ERROR, e.code)
except:
testutil.fail("<red>Function threw wrong exception</red>")
try:
mydba.session.run_sql("badquery")
testutil.fail("<red>Function didn't throw exception as expected</red>")
except mysqlsh.Error as e:
EXPECT_EQ(mysql.ErrorCode.ER_PARSE_ERROR, e.code)
except:
testutil.fail("<red>Function threw wrong exception</red>")
#@<> Check for __qualname__ and __name__ in wrapped methods
EXPECT_EQ("Testutils.deploy_sandbox", testutil.deploy_sandbox.__qualname__)
EXPECT_EQ("Dba.create_cluster", dba.create_cluster.__qualname__)
EXPECT_EQ("deploy_sandbox", testutil.deploy_sandbox.__name__)
EXPECT_EQ("create_cluster", dba.create_cluster.__name__)
#@<> check that isatty exists (checking the return value depends on how the tests are ran)
sys.stdout.isatty()
sys.stdin.isatty()
sys.stderr.isatty()
#@<> Cleanup
mydba.session.close()
testutil.destroy_sandbox(__mysql_sandbox_port1) |
migrations/normalize_user_course_ratings.py | noryb009/rmc | 164 | 12735404 | <reponame>noryb009/rmc
import rmc.models as m
import rmc.shared.constants as c
import mongoengine as me
def normalize_user_course_ratings():
"""Normalize user course ratings to be 0/1 for Yes/No. Before it was
0.2,0.4,0.6,0.8.1.0 OR possible 0.0,0.25,0.5,0.75,1.0"""
num_changes = [0]
def normalize(value):
# Ranges to ignore are 0.5 to 0.6, add some epsilon just to be
# safe against float rounding
if value is None:
new_value = None
elif value < 0.45:
new_value = 0.0
elif value > 0.65:
new_value = 1.0
else:
new_value = None
if new_value != value:
num_changes[0] += 1
return new_value
for uc in m.UserCourse.objects:
if not m.Course.objects.with_id(uc.course_id):
print 'Skipping course %s' % uc.course_id
continue
cr = uc.course_review
pr = uc.professor_review
cr.interest = normalize(cr.interest)
cr.easiness = normalize(cr.easiness)
cr.usefulness = normalize(cr.usefulness)
pr.clarity = normalize(pr.clarity)
pr.passion = normalize(pr.passion)
uc.save()
print 'Updated %d reviews' % num_changes[0]
if __name__ == '__main__':
me.connect(c.MONGO_DB_RMC)
normalize_user_course_ratings()
|
receiver/parse/data/carbon_pb2.py | dvanders/go-carbon | 722 | 12735417 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: carbon.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='carbon.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0c\x63\x61rbon.proto\")\n\x05Point\x12\x11\n\ttimestamp\x18\x01 \x01(\r\x12\r\n\x05value\x18\x02 \x01(\x01\"0\n\x06Metric\x12\x0e\n\x06metric\x18\x01 \x01(\t\x12\x16\n\x06points\x18\x02 \x03(\x0b\x32\x06.Point\"#\n\x07Payload\x12\x18\n\x07metrics\x18\x01 \x03(\x0b\x32\x07.Metricb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_POINT = _descriptor.Descriptor(
name='Point',
full_name='Point',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='Point.timestamp', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='Point.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=57,
)
_METRIC = _descriptor.Descriptor(
name='Metric',
full_name='Metric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metric', full_name='Metric.metric', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='points', full_name='Metric.points', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=107,
)
_PAYLOAD = _descriptor.Descriptor(
name='Payload',
full_name='Payload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metrics', full_name='Payload.metrics', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=144,
)
_METRIC.fields_by_name['points'].message_type = _POINT
_PAYLOAD.fields_by_name['metrics'].message_type = _METRIC
DESCRIPTOR.message_types_by_name['Point'] = _POINT
DESCRIPTOR.message_types_by_name['Metric'] = _METRIC
DESCRIPTOR.message_types_by_name['Payload'] = _PAYLOAD
Point = _reflection.GeneratedProtocolMessageType('Point', (_message.Message,), dict(
DESCRIPTOR = _POINT,
__module__ = 'carbon_pb2'
# @@protoc_insertion_point(class_scope:Point)
))
_sym_db.RegisterMessage(Point)
Metric = _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), dict(
DESCRIPTOR = _METRIC,
__module__ = 'carbon_pb2'
# @@protoc_insertion_point(class_scope:Metric)
))
_sym_db.RegisterMessage(Metric)
Payload = _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), dict(
DESCRIPTOR = _PAYLOAD,
__module__ = 'carbon_pb2'
# @@protoc_insertion_point(class_scope:Payload)
))
_sym_db.RegisterMessage(Payload)
# @@protoc_insertion_point(module_scope)
|
lsassy/dumpmethod/mirrordump.py | scopedsecurity/lsassy | 1,212 | 12735432 | <reponame>scopedsecurity/lsassy
import logging
import os
import time
import base64
import random
import string
from lsassy.impacketfile import ImpacketFile
from lsassy.dumpmethod import IDumpMethod
class DumpMethod(IDumpMethod):
def __init__(self, session, timeout):
super().__init__(session, timeout)
self.mirrordump = "MirrorDump.exe"
self.mirrordump_path = False
self.mirrordump_remote_share = "C$"
self.mirrordump_remote_path = "\\Windows\\Temp\\"
self.mirrordump_uploaded = False
def prepare(self, options):
self.mirrordump = options.get("mirrordump", self.mirrordump)
self.mirrordump_path = options.get("mirrordump_path", self.mirrordump_path)
self.mirrordump_remote_share = options.get("mirrordump_remote_share", self.mirrordump_remote_share)
self.mirrordump_remote_path = options.get("mirrordump_remote_path", self.mirrordump_remote_path)
if not self.mirrordump_path:
logging.error("Missing mirrordump_path")
return None
if not os.path.exists(self.mirrordump_path):
logging.error("{} does not exist.".format(self.mirrordump_path))
return None
# Upload MirrorDump
logging.debug('Copy {} to {}'.format(self.mirrordump_path, self.mirrordump_remote_path))
with open(self.mirrordump_path, 'rb') as p:
try:
self._session.smb_session.putFile(self.mirrordump_remote_share, self.mirrordump_remote_path + self.mirrordump, p.read)
logging.success("MirrorDump successfully uploaded")
self.mirrordump_uploaded = True
return True
except Exception as e:
logging.error("MirrorDump upload error", exc_info=True)
return None
def clean(self):
if self.mirrordump_uploaded:
ImpacketFile.delete(self._session, self.mirrordump_remote_path + self.mirrordump, timeout=self._timeout)
def get_commands(self, dump_path=None, dump_name=None, no_powershell=False):
cmd_command = """{}{} -f {}{} -d {}""".format(
self.mirrordump_remote_path, self.mirrordump,
self.dump_path, self.dump_name,
''.join(random.choice(string.ascii_letters + string.digits) for _ in range(8)) + ".dll"
)
pwsh_command = cmd_command
return {
"cmd": cmd_command,
"pwsh": pwsh_command
}
|
src/api-service/__app__/onefuzzlib/versions.py | tonybaloney/onefuzz | 2,692 | 12735480 | <reponame>tonybaloney/onefuzz<filename>src/api-service/__app__/onefuzzlib/versions.py
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
from typing import Dict
import semver
from memoization import cached
from onefuzztypes.responses import Version
from .__version__ import __version__
@cached
def read_local_file(filename: str) -> str:
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
if os.path.exists(path):
with open(path, "r") as handle:
return handle.read().strip()
else:
return "UNKNOWN"
def versions() -> Dict[str, Version]:
entry = Version(
git=read_local_file("git.version"),
build=read_local_file("build.id"),
version=__version__,
)
return {"onefuzz": entry}
def is_minimum_version(*, version: str, minimum: str) -> bool:
# check if version is at least (or higher) than minimum
return bool(semver.VersionInfo.parse(version).compare(minimum) >= 0)
|
incomplete/rasterizer/rasterizer/examples/__init__.py | adlerliu/500lines | 26,185 | 12735487 | import e1
import e2
import e3
import destijl
|
artemis/general/test_nondeterminism_hunting.py | peteroconnor-bc/artemis | 235 | 12735498 | import numpy as np
import pytest
from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, \
reset_variable_tracker
def _runs_are_the_same(var_gen_1, var_gen_2, use_assert = False):
delete_vars(['_test_random_var_32r5477w32'])
for run, gen in [(0, var_gen_1), (1, var_gen_2)]:
reset_variable_tracker()
for v in gen:
if use_assert:
assert_variable_matches_between_runs(v, '_test_random_var_32r5477w32')
else:
its_a_match=variable_matches_between_runs(v, '_test_random_var_32r5477w32')
if run==0:
assert its_a_match is None
else:
if not its_a_match:
return False
return True
def test_variable_matches_between_runs():
rng1 = np.random.RandomState(1234)
gen1 = (rng1.randn(3, 4) for _ in range(5))
rng2 = np.random.RandomState(1234)
gen2 = (rng2.randn(3, 4) for _ in range(5))
assert _runs_are_the_same(gen1, gen2)
rng = np.random.RandomState(1234)
gen1 = (rng.randn(3, 4) for _ in range(5))
gen2 = (rng.randn(3, 4) for _ in range(5))
assert not _runs_are_the_same(gen1, gen2)
gen1 = (i for i in range(5))
gen2 = (i for i in range(5))
assert _runs_are_the_same(gen1, gen2)
gen1 = (i for i in range(5))
gen2 = (i if i<4 else 7 for i in range(5))
assert not _runs_are_the_same(gen1, gen2)
def test_assert_variable_matches_between_runs():
rng1 = np.random.RandomState(1234)
gen1 = (rng1.randn(3, 4) for _ in range(5))
rng2 = np.random.RandomState(1234)
gen2 = (rng2.randn(3, 4) for _ in range(5))
_runs_are_the_same(gen1, gen2, use_assert=True)
rng = np.random.RandomState(1234)
gen1 = (rng.randn(3, 4) for _ in range(5))
gen2 = (rng.randn(3, 4) for _ in range(5))
with pytest.raises(AssertionError):
_runs_are_the_same(gen1, gen2, use_assert=True)
gen1 = (i for i in range(5))
gen2 = (i for i in range(5))
_runs_are_the_same(gen1, gen2, use_assert=True)
gen1 = (i for i in range(5))
gen2 = (i if i<4 else 7 for i in range(5))
with pytest.raises(AssertionError):
_runs_are_the_same(gen1, gen2, use_assert=True)
if __name__ == '__main__':
test_variable_matches_between_runs()
test_assert_variable_matches_between_runs()
|
PGGAN/utils.py | MingtaoGuo/DCGAN_WGAN_WGAN-GP_LSGAN_SNGAN_TensorFlow | 149 | 12735504 | <reponame>MingtaoGuo/DCGAN_WGAN_WGAN-GP_LSGAN_SNGAN_TensorFlow
import scipy.io as sio
import numpy as np
def read_data(path):
for i in range(1, 6):
if i == 1:
data_mat = sio.loadmat(path + "data_batch_" + str(i) + ".mat")
data = np.transpose(np.reshape(data_mat["data"], [10000, 3, 32, 32]), [0, 2, 3, 1])
labels = data_mat["labels"]
else:
data_mat = sio.loadmat(path + "data_batch_" + str(i) + ".mat")
temp = np.transpose(np.reshape(data_mat["data"], [10000, 3, 32, 32]), [0, 2, 3, 1])
data = np.concatenate((temp, data), axis=0)
labels = np.concatenate((data_mat["labels"], labels), axis=0)
return data, labels
def get_batch(data, batchsize):
data_nums = data.shape[0]
rand_select = np.random.randint(0, data_nums, [batchsize])
batch = data[rand_select]
z = np.random.normal(0, 1, [batchsize, 512])
return batch, z
def read_face_data(path):
data = sio.loadmat(path)
return data["data"]
# a, b = read_data("./dataset/")
# a = 0
|
vel/rl/algo/distributional_dqn.py | galatolofederico/vel | 273 | 12735562 | <gh_stars>100-1000
import torch
import torch.nn.utils
from vel.api import ModelFactory
from vel.api.metrics.averaging_metric import AveragingNamedMetric
from vel.rl.api import OptimizerAlgoBase
class DistributionalDeepQLearning(OptimizerAlgoBase):
""" Deep Q-Learning algorithm """
def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool,
target_update_frequency: int, max_grad_norm: float):
super().__init__(max_grad_norm)
self.model_factory = model_factory
self.discount_factor = discount_factor
self.double_dqn = double_dqn
self.target_update_frequency = target_update_frequency
self.target_model = None
self.vmin = None
self.vmax = None
self.num_atoms = None
self.support_atoms = None
self.atom_delta = None
def initialize(self, training_info, model, environment, device):
""" Initialize policy gradient from reinforcer settings """
self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device)
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval()
histogram_info = model.histogram_info()
self.vmin = histogram_info['vmin']
self.vmax = histogram_info['vmax']
self.num_atoms = histogram_info['num_atoms']
self.support_atoms = histogram_info['support_atoms']
self.atom_delta = histogram_info['atom_delta']
def calculate_gradient(self, batch_info, device, model, rollout):
""" Calculate loss of the supplied rollout """
evaluator = model.evaluate(rollout)
batch_size = rollout.frames()
dones_tensor = evaluator.get('rollout:dones')
rewards_tensor = evaluator.get('rollout:rewards')
assert dones_tensor.dtype == torch.float32
with torch.no_grad():
target_evaluator = self.target_model.evaluate(rollout)
if self.double_dqn:
# DOUBLE DQN
# Histogram gets returned as logits initially, we need to exp it before projection
target_value_histogram_for_all_actions = target_evaluator.get('model:q_dist_next').exp()
model_value_histogram_for_all_actions = evaluator.get('model:q_dist_next').exp()
atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms)
selected_action_indices = (
(atoms_aligned * model_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1)
)
# Select largest 'target' value based on action that 'model' selects
next_value_histograms = (
target_value_histogram_for_all_actions[range(batch_size), selected_action_indices]
)
else:
# REGULAR DQN
# Histogram gets returned as logits initially, we need to exp it before projection
target_value_histogram_for_all_actions = target_evaluator.get('model:q_dist_next').exp()
atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms)
selected_action_indices = (
(atoms_aligned * target_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1)
)
next_value_histograms = (
target_value_histogram_for_all_actions[range(batch_size), selected_action_indices]
)
# HISTOGRAM PROJECTION CODE
forward_steps = rollout.extra_data.get('forward_steps', 1)
atoms_projected = (
rewards_tensor.unsqueeze(1) +
(self.discount_factor ** forward_steps) *
(1 - dones_tensor).unsqueeze(1) * self.support_atoms.unsqueeze(0)
)
atoms_projected = atoms_projected.clamp(min=self.vmin, max=self.vmax)
projection_indices = (atoms_projected - self.vmin) / self.atom_delta
index_floor = projection_indices.floor().long()
index_ceil = projection_indices.ceil().long()
# Fix corner case when index_floor == index_ceil
index_floor[(index_ceil > 0) * (index_floor == index_ceil)] -= 1
index_ceil[(index_floor < (self.num_atoms - 1)) * (index_floor == index_ceil)] += 1
value_histogram_projected = torch.zeros_like(next_value_histograms)
# Following part will be a bit convoluted, in an effort to fully vectorize projection operation
# Special offset index tensor
offsets = (
torch.arange(0, batch_size * self.num_atoms, self.num_atoms)
.unsqueeze(1)
.expand(batch_size, self.num_atoms)
.contiguous().view(-1).to(device)
)
# Linearize all the buffers
value_histogram_projected = value_histogram_projected.view(-1)
index_ceil = index_ceil.view(-1)
index_floor = index_floor.view(-1)
projection_indices = projection_indices.view(-1)
value_histogram_projected.index_add_(
0,
index_floor+offsets,
(next_value_histograms.view(-1) * (index_ceil.float() - projection_indices))
)
value_histogram_projected.index_add_(
0,
index_ceil+offsets,
(next_value_histograms.view(-1) * (projection_indices - index_floor.float()))
)
value_histogram_projected = value_histogram_projected.reshape(next_value_histograms.shape)
q_log_histogram_selected = evaluator.get('model:action:q_dist')
# Cross-entropy loss as usual
original_losses = -(value_histogram_projected * q_log_histogram_selected).sum(dim=1)
if evaluator.is_provided('rollout:weights'):
weights = evaluator.get('rollout:weights')
else:
weights = torch.ones_like(rewards_tensor)
loss_value = torch.mean(weights * original_losses)
loss_value.backward()
with torch.no_grad():
mean_q_model = (self.support_atoms.unsqueeze(0) * torch.exp(q_log_histogram_selected)).sum(dim=1).mean()
mean_q_target = (self.support_atoms.unsqueeze(0) * value_histogram_projected).sum(dim=1).mean()
return {
'loss': loss_value.item(),
# We need it to update priorities in the replay buffer:
'errors': original_losses.detach().cpu().numpy(),
'average_q_selected': mean_q_model.item(),
'average_q_target': mean_q_target.item()
}
def post_optimization_step(self, batch_info, device, model, rollout):
""" Steps to take after optimization has been done"""
if batch_info.aggregate_batch_number % self.target_update_frequency == 0:
self.target_model.load_state_dict(model.state_dict())
self.target_model.eval()
def metrics(self) -> list:
""" List of metrics to track for this learning process """
return [
AveragingNamedMetric("loss"),
AveragingNamedMetric("average_q_selected"),
AveragingNamedMetric("average_q_target"),
AveragingNamedMetric("grad_norm"),
]
def create(model: ModelFactory, discount_factor: float, target_update_frequency: int,
max_grad_norm: float, double_dqn: bool = False):
""" Vel factory function """
return DistributionalDeepQLearning(
model_factory=model,
discount_factor=discount_factor,
double_dqn=double_dqn,
target_update_frequency=target_update_frequency,
max_grad_norm=max_grad_norm
)
|
LeetCode/0371_sum_of_two_integers.py | LenartBucar/PythonAlgorithms | 144 | 12735629 | class Solution:
def getSum(self, a: int, b: int) -> int:
# 32 bits integer max and min
MAX = 0x7FFFFFFF
MIN = 0x80000000
mask = 0xFFFFFFFF
while b != 0:
carry = a & b
a, b = (a ^ b) & mask, (carry << 1) & mask
return a if a <= MAX else ~(a ^ mask) |
src/show_results.py | jimkon/Deep-Reinforcement-Learning-in-Large-Discrete-Action-Spaces | 154 | 12735635 | <filename>src/show_results.py
#!/usr/bin/python3
import numpy as np
from util.data_process import *
def show():
folder = 'saved/'
episodes = 10000
actions = 100
k = 10
experiment = 'InvertedPendulum-v1'
v = 3
id = 0
name = 'results/obj/{}data_{}_Wolp{}_{}{}k{}#{}.json.zip'.format(folder,
episodes,
v,
experiment[:3],
actions,
k,
id
)
data_process = Data_handler(name)
print("Data file is loaded")
data_process.plot_rewards()
data_process.plot_average_reward()
data_process.plot_action_distribution()
data_process.plot_action_distribution_over_time()
data_process.plot_action_error()
if __name__ == '__main__':
show()
|
alipay/aop/api/response/AlipayIserviceCcmInstanceGetResponse.py | antopen/alipay-sdk-python-all | 213 | 12735645 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayIserviceCcmInstanceGetResponse(AlipayResponse):
def __init__(self):
super(AlipayIserviceCcmInstanceGetResponse, self).__init__()
self._create_time = None
self._creator_id = None
self._description = None
self._external_id = None
self._id = None
self._name = None
self._status = None
self._update_time = None
self._updater_id = None
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def creator_id(self):
return self._creator_id
@creator_id.setter
def creator_id(self, value):
self._creator_id = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def update_time(self):
return self._update_time
@update_time.setter
def update_time(self, value):
self._update_time = value
@property
def updater_id(self):
return self._updater_id
@updater_id.setter
def updater_id(self, value):
self._updater_id = value
def parse_response_content(self, response_content):
response = super(AlipayIserviceCcmInstanceGetResponse, self).parse_response_content(response_content)
if 'create_time' in response:
self.create_time = response['create_time']
if 'creator_id' in response:
self.creator_id = response['creator_id']
if 'description' in response:
self.description = response['description']
if 'external_id' in response:
self.external_id = response['external_id']
if 'id' in response:
self.id = response['id']
if 'name' in response:
self.name = response['name']
if 'status' in response:
self.status = response['status']
if 'update_time' in response:
self.update_time = response['update_time']
if 'updater_id' in response:
self.updater_id = response['updater_id']
|
pipsi/scripts/find_scripts.py | mitsuhiko/pipsi | 1,841 | 12735661 | <reponame>mitsuhiko/pipsi
import os
import sys
import pkg_resources
pkg = sys.argv[1]
prefix = sys.argv[2]
dist = pkg_resources.get_distribution(pkg)
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
print(os.path.join(dist.location, line.split(',')[0]))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
print(os.path.join(dist.egg_info, line.split(',')[0]))
elif dist.has_metadata('entry_points.txt'):
try:
from ConfigParser import SafeConfigParser
from StringIO import StringIO
except ImportError:
from configparser import SafeConfigParser
from io import StringIO
parser = SafeConfigParser()
parser.readfp(StringIO(
'\n'.join(dist.get_metadata_lines('entry_points.txt'))))
if parser.has_section('console_scripts'):
for name, _ in parser.items('console_scripts'):
print(os.path.join(prefix, name))
|
core/visualize/visualizer.py | hyunynim/DIST-Renderer | 176 | 12735681 | import os, sys
import time
import torch
import numpy as np
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal
import copy
import cv2
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image as pil
import pickle
def print_loss_pack(loss_pack, name):
loss_depth, loss_mask_gt, loss_mask_out, loss_normal, loss_l2reg = loss_pack['depth'], loss_pack['mask_gt'], loss_pack['mask_out'], loss_pack['normal'], loss_pack['l2reg']
if len(loss_depth.shape) == 1:
loss_mask_gt, loss_mask_out, loss_depth, loss_normal, loss_l2reg = loss_mask_gt.mean(), loss_mask_out.mean(), loss_depth.mean(), loss_normal.mean(), loss_l2reg.mean()
print('NAME = [{0}] -- loss_depth: {1:.4f}, loss_mask_gt: {2:.4f}, loss_mask_out: {3:.4f}, loss_normal: {4:.4f}, loss_l2reg: {5:.4f}'.format(name, loss_depth.detach().cpu().numpy(), loss_mask_gt.detach().cpu().numpy(), loss_mask_out.detach().cpu().numpy(), loss_normal.detach().cpu().numpy(), loss_l2reg.detach().cpu().numpy()))
def print_loss_pack_color(loss_pack, name):
loss_color, loss_depth, loss_mask_gt, loss_mask_out, loss_normal, loss_l2reg, loss_l2reg_c = loss_pack['color'], loss_pack['depth'], loss_pack['mask_gt'], loss_pack['mask_out'], loss_pack['normal'], loss_pack['l2reg'], loss_pack['l2reg_c']
print('NAME = [{0}] -- loss_color: {1:.4f}, loss_depth: {2:.4f}, loss_mask_gt: {3:.4f}, loss_mask_out: {4:.4f}, loss_normal: {5:.4f}, loss_l2reg: {6:.4f}, loss_l2re_cg: {7:.4f}'.format(name, loss_color.detach().cpu().numpy(), loss_depth.detach().cpu().numpy(), loss_mask_gt.detach().cpu().numpy(), loss_mask_out.detach().cpu().numpy(), loss_normal.detach().cpu().numpy(), loss_l2reg.detach().cpu().numpy(), loss_l2reg_c.detach().cpu().numpy()))
def demo_color_save_render_output(prefix, sdf_renderer, shape_code, color_code, camera, lighting_loc=None, profile=False):
R, T = camera.extrinsic[:,:3], camera.extrinsic[:,3]
R, T = torch.from_numpy(R).float().cuda(), torch.from_numpy(T).float().cuda()
R.requires_grad, T.requires_grad = False, False
if lighting_loc is not None:
lighting_locations = torch.from_numpy(lighting_loc).float().unsqueeze(0).cuda()
else:
lighting_locations = None
render_output = sdf_renderer.render(color_code, shape_code, R, T, profile=profile, no_grad=True, lighting_locations=lighting_locations)
depth_rendered, normal_rendered, color_rgb, valid_mask_rendered, min_sdf_sample = render_output
data = {}
data['depth'] = depth_rendered.detach().cpu().numpy()
data['normal'] = normal_rendered.detach().cpu().numpy()
data['mask'] = valid_mask_rendered.detach().cpu().numpy()
data['color'] = color_rgb.detach().cpu().numpy()
data['min_sdf_sample'] = min_sdf_sample.detach().cpu().numpy()
data['latent_tensor'] = shape_code.detach().cpu().numpy()
data['K'] = sdf_renderer.get_intrinsic()
data['RT'] = torch.cat([R, T[:,None]], 1).detach().cpu().numpy()
fname = prefix + '_info.pkl'
with open(fname, 'wb') as f:
pickle.dump(data, f)
img_hw = sdf_renderer.get_img_hw()
visualizer = Visualizer(img_hw)
print('Writing to prefix: {}'.format(prefix))
visualizer.visualize_depth(prefix + '_depth.png', depth_rendered.detach().cpu().numpy(), valid_mask_rendered.detach().cpu().numpy())
visualizer.visualize_normal(prefix + '_normal.png', normal_rendered.detach().cpu().numpy(), valid_mask_rendered.detach().cpu().numpy(), bgr2rgb=True)
visualizer.visualize_mask(prefix + '_silhouette.png', valid_mask_rendered.detach().cpu().numpy())
cv2.imwrite(prefix + '_rendered_rgb.png', color_rgb.detach().cpu().numpy() * 255)
class Visualizer(object):
def __init__(self, img_hw, dmin=0.0, dmax=10.0):
self.img_h, self.img_w = img_hw[0], img_hw[1]
self.data = {}
self.dmin, self.dmax = dmin, dmax
self.loss_counter = 0
self.loss_curve = {}
self.loss_list = []
self.chamfer_list = []
def get_data(self, data_name):
if data_name in self.data.keys():
return self.data[data_name]
else:
raise ValueError('Key {0} does not exist.'.format(data_name))
def set_data(self, data):
self.data = data
def reset_data(self):
self.data = {}
keys = ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out',
'depth_gt', 'depth_output', 'loss_depth',
'normal_gt', 'normal_output', 'loss_normal']
for key in keys:
self.data[key] = np.zeros((64, 64))
def reset_loss_curve(self):
self.loss_counter = 0
self.loss_curve = {}
def reset_all(self):
self.reset_data()
self.reset_loss_curve()
def add_loss_from_pack(self, loss_pack):
'''
potential properties:
['mask_gt', 'mask_out', 'depth' 'normal', 'l2reg']
'''
loss_name_list = list(loss_pack.keys())
if self.loss_curve == {}:
for loss_name in loss_name_list:
self.loss_curve[loss_name] = []
for loss_name in loss_name_list:
loss_value = loss_pack[loss_name].detach().cpu().numpy()
self.loss_curve[loss_name].append(loss_value)
self.loss_counter = self.loss_counter + 1
def add_loss(self, loss):
self.loss_list.append(loss.detach().cpu().numpy())
def add_chamfer(self, chamfer):
self.chamfer_list.append(chamfer)
def add_data(self, data_name, data_src, data_mask=None):
'''
potential properties:
mask: ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out']
depth: ['depth_gt', 'depth_output', 'loss_depth']
normal: ['normal_gt', 'normal_output', 'loss_normal']
'''
if data_mask is None:
self.data[data_name] = data_src
else:
data_map = np.zeros(data_mask.shape)
data_map[data_mask != 0] = data_src
self.data[data_name] = data_map
def save_depth(self, fname, depth_vis, cmap='magma', direct=False):
if direct:
cv2.imwrite(fname, depth_vis)
return 0
vmin, vmax = 0, 255
normalizer = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap=cmap)
colormapped_im = (mapper.to_rgba(depth_vis)[:,:,:3] * 255).astype(np.uint8)
im = pil.fromarray(colormapped_im)
im.save(fname)
def save_mask(self, fname, mask_vis, bgr2rgb=False):
if bgr2rgb:
mask_vis = cv2.cvtColor(mask_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, mask_vis)
def save_normal(self, fname, normal_vis, bgr2rgb=False):
if bgr2rgb:
normal_vis = cv2.cvtColor(normal_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, normal_vis)
def save_error(self, fname, error_vis, bgr2rgb=False):
self.save_depth(fname, error_vis, cmap='jet')
def visualize_depth(self, fname, depth, mask=None):
# depth_vis = get_vis_depth(depth, mask=mask, dmin=self.dmin, dmax=self.dmax)
depth_vis = get_vis_depth(depth, mask=mask)
# self.save_depth(fname, depth_vis)
cv2.imwrite(fname, depth_vis)
def visualize_normal(self, fname, normal, mask=None, bgr2rgb=False):
normal_vis = get_vis_normal(normal, mask=mask)
if bgr2rgb:
normal_vis = cv2.cvtColor(normal_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, normal_vis)
def visualize_mask(self, fname, mask, bgr2rgb=False):
mask_vis = get_vis_mask(mask)
if bgr2rgb:
mask_vis = cv2.cvtColor(mask_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, mask_vis)
def imshow(self, ax, img, title=None):
ax.imshow(img)
ax.axis('off')
if title is not None:
ax.set_title(title)
def imshow_bgr2rgb(self, ax, img, title=None):
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ax.imshow(img)
ax.axis('off')
if title is not None:
ax.set_title(title)
def show_loss_curve(self, fname):
pass
def show_all_data_3x4(self, fname):
fig, axs = plt.subplots(3, 4, figsize=(30,30))
# first row, groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[0, 0], 255 - depth_gt_vis, title='depth gt')
normal_gt_vis = get_vis_normal(self.data['normal_gt'], mask=self.data['mask_gt'])
self.imshow(axs[0, 1], normal_gt_vis, title='normal gt')
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.imshow_bgr2rgb(axs[0, 2], 255 - mask_gt_vis, title='mask gt')
axs[0, 3].axis('off')
# second row, output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[1, 0], 255 - depth_output_vis, title='depth output')
normal_output_vis = get_vis_normal(self.data['normal_output'], mask=self.data['mask_output'])
self.imshow(axs[1, 1], normal_output_vis, title='normal output')
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.imshow_bgr2rgb(axs[1, 2], 255 - mask_output_vis, title='mask output')
axs[1, 3].axis('off')
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5)
self.imshow_bgr2rgb(axs[2, 0], 255 - loss_depth_vis, title='depth loss')
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)
self.imshow_bgr2rgb(axs[2, 1], 255 - loss_normal_vis, title='normal loss')
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt']) > 0)
self.imshow_bgr2rgb(axs[2, 2], 255 - loss_mask_gt_vis, title='gt \ output')
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out']) > 0)
self.imshow_bgr2rgb(axs[2, 3], 255 - loss_mask_out_vis, title='output \ gt')
# savefig
fig.savefig(fname)
plt.close('all')
def save_all_data(self, prefix):
# groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.save_depth(prefix + '_depth_gt.png', depth_gt_vis, cmap='magma', direct=True)
normal_gt_vis = get_vis_normal(self.data['normal_gt'], mask=self.data['mask_gt'])
self.save_normal(prefix + '_normal_gt.png', normal_gt_vis, bgr2rgb=True)
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.save_mask(prefix + '_mask_gt.png', mask_gt_vis)
# output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.save_depth(prefix + '_depth_output.png', depth_output_vis, cmap='magma', direct=True)
normal_output_vis = get_vis_normal(self.data['normal_output'], mask=self.data['mask_output'])
self.save_normal(prefix + '_normal_output.png', normal_output_vis, bgr2rgb=True)
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.save_mask(prefix + '_mask_output.png', mask_output_vis)
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5, bg_color=0)
self.save_error(prefix + '_depth_loss.png', loss_depth_vis, bgr2rgb=True)
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0, bg_color=0)
self.save_error(prefix + '_normal_loss.png', loss_normal_vis, bgr2rgb=True)
loss_mask_gt_vis = get_vis_depth(np.abs(self.data['loss_mask_gt']), bg_color=0)
self.save_error(prefix + '_mask_gt_loss.png', loss_mask_gt_vis, bgr2rgb=True)
loss_mask_out_vis = get_vis_depth(np.abs(self.data['loss_mask_out']), bg_color=0)
self.save_error(prefix + '_mask_out_loss.png', loss_mask_out_vis, bgr2rgb=True)
self.save_error(prefix + '_mask_loss.png', loss_mask_gt_vis + loss_mask_out_vis, bgr2rgb=True)
def dump_all_data(self, fname):
with open(fname, 'wb') as f:
pickle.dump({'data': self.data, 'loss_curve': self.loss_curve, 'loss_list': self.loss_list, 'chamfer_list': self.chamfer_list}, f)
def show_all_data(self, fname):
self.show_all_data_3x4(fname)
# self.save_all_data(fname[:-4])
def show_all_data_color(self, fname):
fig, axs = plt.subplots(3, 4, figsize=(30,30))
# first row, groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[0, 0], depth_gt_vis, title='depth gt')
normal_gt_vis = get_vis_normal(self.data['normal_gt'])
self.imshow_bgr2rgb(axs[0, 1], normal_gt_vis, title='normal gt')
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.imshow_bgr2rgb(axs[0, 2], mask_gt_vis, title='mask gt')
self.imshow_bgr2rgb(axs[0, 3], self.data['color_gt'], title='rgb gt')
# second row, output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[1, 0], depth_output_vis, title='depth output')
normal_output_vis = get_vis_normal(self.data['normal_output'])
self.imshow_bgr2rgb(axs[1, 1], normal_output_vis, title='normal output')
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.imshow_bgr2rgb(axs[1, 2], mask_output_vis, title='mask output')
self.imshow_bgr2rgb(axs[1, 3], self.data['color_output'], title='rgb output')
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5)
self.imshow_bgr2rgb(axs[2, 0], loss_depth_vis, title='depth loss')
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)
self.imshow_bgr2rgb(axs[2, 1], loss_normal_vis, title='normal loss')
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt']) > 0)
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out']) > 0)
loss_mask_gt_vis += loss_mask_out_vis
self.imshow_bgr2rgb(axs[2, 2], loss_mask_gt_vis, title='mask loss')
self.imshow_bgr2rgb(axs[2, 3], self.data['loss_color'], title='rgb loss')
# savefig
fig.savefig(fname)
plt.close('all')
def return_output_data_color(self):
return self.data['color_output'], self.data['depth_output'], self.data['normal_output'], self.data['mask_output']
def show_all_data_color_multi(self, fname, num_img=4):
fig, axs = plt.subplots(3, 2*num_img, figsize=(8*2*num_img,25))
for i in range(num_img):
# first row, ground truth
self.imshow_bgr2rgb(axs[0, 2*i], self.data['color_gt-{}'.format(i)], title='rgb gt {}'.format(i))
mask_gt_vis = get_vis_mask(self.data['mask_gt-{}'.format(i)])
self.imshow_bgr2rgb(axs[0, 2*i+1], mask_gt_vis, title='mask gt {}'.format(i))
# second row, output
self.imshow_bgr2rgb(axs[1, 2*i], self.data['color_output-{}'.format(i)], title='rgb output {}'.format(i))
mask_output_vis = get_vis_mask(self.data['mask_output-{}'.format(i)])
self.imshow_bgr2rgb(axs[1, 2*i+1], mask_output_vis, title='mask output {}'.format(i))
# third row, loss
self.imshow_bgr2rgb(axs[2, 2*i], self.data['loss_color-{}'.format(i)], title='rgb loss {}'.format(i))
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt-{}'.format(i)]) > 0)
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out-{}'.format(i)]) > 0)
loss_mask_gt_vis += loss_mask_out_vis
self.imshow_bgr2rgb(axs[2, 2*i+1], loss_mask_gt_vis, title='mask loss {}'.format(i))
# savefig
plt.subplots_adjust(top=0.95, right=0.99, left=0.01, bottom=0.01, wspace=0.05, hspace=0.1)
fig.savefig(fname)
plt.close('all')
def show_all_data_color_warp(self, fname):
fig, axs = plt.subplots(1, 5, figsize=(15, 3.4))
self.imshow_bgr2rgb(axs[0], self.data['color_gt-1'], title='view 1')
self.imshow_bgr2rgb(axs[1], self.data['color_gt-2'], title='view 2')
self.imshow_bgr2rgb(axs[2], self.data['color_valid-1'], title='valid region in view 1')
self.imshow_bgr2rgb(axs[3], self.data['color_valid-2'], title='warped color from view 2')
self.imshow_bgr2rgb(axs[4], self.data['color_valid_loss'], title='color loss')
# savefig
plt.subplots_adjust(top=0.99, right=0.99, left=0.01, bottom=0.00, wspace=0.05, hspace=0)
fig.savefig(fname)
plt.close('all')
|
main.py | vasujain/wfh-ninja | 196 | 12735702 | from flask import *
from flask.json import JSONEncoder
from flask.ext.cors import CORS
from flask.ext.login import LoginManager, login_user , logout_user , current_user , login_required
from werkzeug.contrib.fixers import ProxyFix
import simplejson as json
import os, sys
import datetime
app = Flask(__name__, static_url_path='/static')
sess = Session()
app.config.from_object('config')
if app.config['SQLALCHEMY_DATABASE_URI'] == None:
print "Need database config"
sys.exit(1)
from models import db, Quote, Vote, User
db.init_app(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@app.before_request
def before_request():
g.user = current_user
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# registers user
@app.route('/register', methods = ['POST'])
def register():
body = request.get_json()
print app.config
if 'secret' not in body or body['secret'] != app.config['ADMIN_REGISTRATION_SECRET_KEY']:
return jsonify({"Error": "Secret key is wrong"})
email = body['email']
password = body['password']
user = User(email=email, password=password)
db.session.add(user)
db.session.commit()
return jsonify(user.serialize)
# renders login page
@app.route('/login', methods=['GET'])
def render_login():
return app.send_static_file('login.html')
@app.route('/', methods=['GET'])
def render_index():
return app.send_static_file('index.html')
# user login
@app.route('/login', methods = ['POST'])
def login():
body = request.get_json()
if body:
email = body['email']
password = body['password']
else:
email = request.form.get('email')
password = request.form.get('password')
registered_user = User.query.filter_by(email=email,password=password).first()
if registered_user is None:
return jsonify({"Error": "Email or Password invalid"})
login_user(registered_user)
return redirect("/admin", code=302)
# renders admin page
@app.route('/admin', methods=['GET'])
def render_admin():
if current_user.is_authenticated() is False:
return redirect("/login", code=302)
return app.send_static_file('admin.html')
# user logout
@app.route('/logout', methods = ['GET'])
def logout():
logout_user()
return redirect("/login", code=302)
# renders summary page
@app.route('/summary', methods=['GET'])
def render_summary():
return app.send_static_file('summary.html')
# get all quotes
@app.route("/quote", methods = ['GET'])
def get_quote():
results = {}
if current_user.is_authenticated() is True and request.args and request.args['all'] == "true":
result = Quote.query.all()
for item in result:
results[item.id] = item.serialize
else:
# if user is not authenticated, return only quotes that are approved
result = Quote.query.filter(Quote.active==True).all()
for item in result:
results[item.id] = item.serialize
scores = db.session.query(Vote.quote_id, db.func.sum(Vote.value).label("score")).group_by(Vote.quote_id).join(Quote).filter(Quote.id.in_(results.keys())).all()
for i in scores:
results[i[0]]["score"] = i[1]
return jsonify(results)
# gets details of single quote
@app.route("/quote/<int:id>", methods = ['GET'])
def get_single_quote(id):
quote = Quote.query.get(id)
quote.view_count += 1
quote_score = db.session.query(db.func.sum(Vote.value)).group_by(Vote.quote_id).filter(Vote.quote_id==id).all()
db.session.commit()
quote = quote.serialize
quote["score"] = quote_score[0][0]
return jsonify(quote)
# submits a new quote
@app.route("/quote", methods = ['POST'])
def post_new_quote():
body = request.get_json()
conditions = {}
if "conditions" in body:
conditions = body['conditions']
ip = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
ip = ip.partition(',')[0]
quote = Quote(text = body['text'], conditions = json.dumps(conditions), view_count = 1, ip = ip, active = False)
db.session.add(quote)
db.session.commit()
vote = Vote(ip = ip, value = 1, quote_id = quote.id) #auto upvote every new quote by 1
db.session.add(vote)
db.session.commit()
return jsonify(quote.serialize)
# submits a new vote for a single quote
@app.route("/quote/<int:quote_id>/vote", methods = ['POST'])
def post_new_vote(quote_id):
body = request.get_json()
ip = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
ip = ip.partition(',')[0]
vote = Vote(ip = ip, value = body['value'], quote_id = quote_id)
db.session.add(vote)
db.session.commit()
return jsonify(vote.serialize)
# approves/ activates a single quote
@app.route("/quote/<int:id>/approve", methods = ['PUT'])
@login_required
def approve_quote(id):
quote = Quote.query.get(id)
quote.active = True
db.session.commit()
return jsonify(quote.serialize)
# unapproves/ rejects a single quote
@app.route("/quote/<int:id>/reject", methods = ['PUT'])
@login_required
def reject_quote(id):
quote = Quote.query.get(id)
quote.active = False
db.session.commit()
return jsonify(quote.serialize)
# deletes a single quote
@app.route("/quote/<int:id>", methods = ['DELETE'])
@login_required
def delete_quote(id):
vote = Vote.query.filter_by(quote_id = id).all()
quote = Quote.query.filter_by(id = id).all()
if quote == []:
return jsonify({"Error":"Quote does not exist"})
for v in vote:
db.session.delete(v)
db.session.commit()
for q in quote:
db.session.delete(q)
db.session.commit()
return jsonify({"Success":"Quote has been deleted"})
cors = CORS(app)
if __name__ == "__main__":
# app.debug = True #uncomment to run debug mode
app.run()
|
examples/example-protobuf-client.py | hessu/carbon | 961 | 12735731 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" Copyright 2013 <NAME>
Copyright 2017 The Graphite Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Import the precompiled protobuffer. It can be recompiled with:
# $ protoc --python_out=. carbon.proto
from carbon.carbon_pb2 import Payload
import os
import sys
import time
import socket
import struct
CARBON_SERVER = '127.0.0.1'
CARBON_PROTOBUF_PORT = 2005
DELAY = 60
def run(sock, delay):
"""Make the client go go go"""
while True:
# Epoch, timestamp in seconds since 1970
now = int(time.time())
# Initialize the protobuf payload
payload_pb = Payload()
labels = ['1min', '5min', '15min']
for name, value in zip(labels, os.getloadavg()):
m = payload_pb.metrics.add()
m.metric = 'system.loadavg_' + name
p = m.points.add()
p.timestamp = now
p.value = value
print("sending message")
print(('-' * 80))
print(payload_pb)
package = payload_pb.SerializeToString()
# The message must be prepended with its size
size = struct.pack('!L', len(package))
sock.sendall(size)
# Then send the actual payload
sock.sendall(package)
time.sleep(delay)
def main():
"""Wrap it all up together"""
delay = DELAY
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg.isdigit():
delay = int(arg)
else:
sys.stderr.write(
"Ignoring non-integer argument. Using default: %ss\n"
% delay)
sock = socket.socket()
try:
sock.connect((CARBON_SERVER, CARBON_PROTOBUF_PORT))
except socket.error:
raise SystemExit("Couldn't connect to %(server)s on port %(port)d, "
"is carbon-cache.py running?" %
{'server': CARBON_SERVER,
'port': CARBON_PROTOBUF_PORT})
try:
run(sock, delay)
except KeyboardInterrupt:
sys.stderr.write("\nExiting on CTRL-c\n")
sys.exit(0)
if __name__ == "__main__":
main()
|
pylayers/antprop/examples/ex_vsh3.py | usmanwardag/pylayers | 143 | 12735751 | from pylayers.antprop.antenna import *
from numpy import *
from matplotlib.pylab import *
kf = 30
A = Antenna('S2R3.vsh3','ant')
phi = linspace(0,2*pi,180)
theta=array([1.57])
Fth,Fph = A.pattern(theta,phi)
polar(phi,abs(Fth[kf,0,:]),phi,abs(Fph[kf,0,:]))
B = Antenna('S2R3.mat','ant/UWBAN/Matfile')
polar(B.phi,abs(B.Ftheta[kf,45,:]),B.phi,abs(B.Fphi[kf,45,:]))
legend((u'$F_{\\theta}^{vsh}$',u'$F_{\phi}^{vsh}$',u'$F_{\\theta}^{original}$',u'$F_{\phi}^{original}$'),loc= 'best')
t=title('$\\theta=\\frac{\pi}{2}$'+', f = '+str(A.fa[kf])[0:6]+ ' GHz')
t.set_fontsize(18)
savefig('polarvsh3.png')
show()
|
daisy_workflows/image_import/suse/suse_import/on_demand/validate_chroot.py | zoran15/compute-image-tools | 186 | 12735828 | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure directories and files are staged in the chrooted guest to match
what's expected by run_in_chroot.sh
Benefits:
1. Avoid symlinks in the chroot that point outside of it.
2. Provide useful error messages prior to running chroot script.
"""
import logging
from pathlib import Path
import typing
def is_file(expected_file: Path, substring='') -> typing.List[str]:
"""Assert that expected_file exists. If substring is provided,
assert that the file contains it.
Returns:
A list of errors found, or empty if successful.
"""
logger = logging.getLogger('_is_file {}'.format(expected_file))
errs = []
try:
actual_content = expected_file.read_text()
logger.debug('content: {}'.format(substring))
if substring and substring not in actual_content:
errs.append('{content} not found in {fname}'.format(
content=substring, fname=expected_file))
except BaseException as e:
logger.debug(e)
errs.append('File not found: {}'.format(expected_file))
return errs
def is_non_empty_dir(expected_dir: Path) -> typing.List[str]:
"""Assert that directory exists, and that it's not empty.
Returns:
A list of errors found, or empty if successful.
"""
errs = []
if expected_dir.is_dir():
for child in expected_dir.iterdir():
if child.is_file() or (
child.is_dir() and len(is_non_empty_dir(child)) == 0):
return errs
errs.append('Directory is empty: {}'.format(expected_dir))
else:
errs.append('Directory not found: {}'.format(expected_dir))
return errs
def check_root(fs_root: Path, runtime_dir: Path,
check_os_mounts=True) -> typing.List[str]:
"""Assert that the filesystem rooted at fs_root follows
the layout expected by run_in_chroot.sh.
Args:
fs_root: Directory to consider root of filesystem.
runtime_dir: Location where run_in_chroot.sh expects
its runtime dependencies.
check_os_mounts: Whether to check mounts such as dev, proc, sys.
Returns:
A list of errors found, or empty if successful.
"""
checks = [
lambda: is_file(
runtime_dir / 'cloud_product.txt',
substring='sle-module-public-cloud'),
lambda: is_file(
runtime_dir / 'post_convert_packages.txt'),
lambda: is_file(
runtime_dir / 'run_in_chroot.sh',
substring='#!/usr/bin/env bash'),
lambda: is_non_empty_dir(
runtime_dir / 'pre_convert_py'
),
lambda: is_non_empty_dir(
runtime_dir / 'pre_convert_rpm'
),
lambda: is_file(
fs_root / 'etc/hosts',
substring='metadata.google.internal'),
lambda: is_file(
fs_root / 'etc/resolv.conf',
substring='google.internal'),
]
if check_os_mounts:
checks += [
lambda: is_non_empty_dir(fs_root / 'dev'),
lambda: is_non_empty_dir(fs_root / 'proc'),
lambda: is_non_empty_dir(fs_root / 'sys'),
]
errs = []
for c in checks:
errs += c()
return errs
|
pygrim/formulas/gaussian_quadrature.py | mezzarobba/fungrim | 102 | 12735847 | # -*- coding: utf-8 -*-
from ..expr import *
def_Topic(
Title("Gaussian quadrature"),
Section("Gauss-Legendre quadrature"),
SeeTopics("Legendre polynomials"),
Entries(
"0745ee", # Legendre polynomial zeros
"ea4754", # weights
"47b181", # -1,1
"545987", # a,b
),
)
make_entry(ID("ea4754"),
Formula(Equal(GaussLegendreWeight(n, k), 2 / ((1 - LegendrePolynomialZero(n,k)**2) * ComplexDerivative(LegendrePolynomial(n, t), For(t, LegendrePolynomialZero(n, k), 1))**2))),
Variables(n, k),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(k, Range(1, n)))))
make_entry(ID("47b181"),
Formula(Where(LessEqual(Abs(Integral(f(t), For(t, -1, 1)) - Sum(GaussLegendreWeight(n,k) * f(LegendrePolynomialZero(n,k)), For(k, 1, n))),
64*M/(15*(1-rho**-2)*rho**(2*n))), Equal(M, Supremum(Abs(f(t)), ForElement(t, BernsteinEllipse(rho)))))),
Variables(f, n, rho),
Assumptions(And(Element(n, ZZGreaterEqual(1)), Element(rho, RR), Greater(rho, 1),
IsHolomorphic(f(z), ForElement(z, InteriorClosure(BernsteinEllipse(rho)))))),
References("<NAME>, Is Gauss Quadrature Better than Clenshaw-Curtis? SIAM Rev., 50(1), 67-87. DOI:10.1137/060659831"))
make_entry(ID("545987"),
Formula(Where(LessEqual(Abs(Integral(f(t), For(t, a, b)) - (b-a)/2 * Sum(GaussLegendreWeight(n,k) * f((b-a)/2 * LegendrePolynomialZero(n,k) + (a+b)/2), For(k, 1, n))),
(Abs(b-a)/2) * (64*M/(15*(1-rho**-2)*rho**(2*n)))), Equal(M, Supremum(Abs(f((b-a)/2 * t + (a+b)/2)), ForElement(t, BernsteinEllipse(rho)))))),
Variables(f, a, b, n, rho),
Assumptions(And(Element(a, CC), Element(b, CC), Element(n, ZZGreaterEqual(1)), Element(rho, RR), Greater(rho, 1),
IsHolomorphic(f(z), ForElement(z, Subset(InteriorClosure(BernsteinEllipse(rho))))))),
References("<NAME>, Is Gauss Quadrature Better than Clenshaw-Curtis? SIAM Rev., 50(1), 67-87. DOI:10.1137/060659831"))
|
notebook/pandas_index_columns_select.py | vhn0912/python-snippets | 174 | 12735857 | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
print(df.index)
# Index(['Alice', 'Bob', 'Charlie', 'Dave', 'Ellen', 'Frank'], dtype='object', name='name')
print(df.index.str.contains('li'))
# [ True False True False False False]
print(df[df.index.str.contains('li')])
# age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
print(df.index.str.endswith('e'))
# [ True False True True False False]
print(df[df.index.str.endswith('e')])
# age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
# Dave 68 TX 70
print(df.columns)
# Index(['age', 'state', 'point'], dtype='object')
print(df.columns.str.endswith('e'))
# [ True True False]
print(df.loc[:, df.columns.str.endswith('e')])
# age state
# name
# Alice 24 NY
# Bob 42 CA
# Charlie 18 CA
# Dave 68 TX
# Ellen 24 CA
# Frank 30 NY
print(df.iloc[:, df.columns.str.endswith('e')])
# age state
# name
# Alice 24 NY
# Bob 42 CA
# Charlie 18 CA
# Dave 68 TX
# Ellen 24 CA
# Frank 30 NY
print(df.loc[df.index.str.contains('li'), df.columns.str.endswith('e')])
# age state
# name
# Alice 24 NY
# Charlie 18 CA
|
accounts/social_connect.py | annevandalfsen/screenbird | 121 | 12735870 | <reponame>annevandalfsen/screenbird
import settings
import tweepy
import base64
import hashlib
import hmac
import simplejson as json
from facepy import SignedRequest, GraphAPI
from django.http import HttpResponse, HttpResponseRedirect
from social_auth.models import UserSocialAuth
def twitter_get_auth_url(request):
auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
auth_url = auth.get_authorization_url()
request.session['request_token'] = (auth.request_token.key, auth.request_token.secret)
return HttpResponseRedirect(auth_url)
def check_facebook_connection(user):
"""Checks facebook connection exists with the app if not,
then returns False
"""
try:
user_social = UserSocialAuth.objects.get(user=user, provider='facebook')
extra_data = eval(str(user_social.extra_data))
access_token = extra_data['access_token']
graph = GraphAPI(access_token)
try:
graph = graph.get('me/')
except GraphAPI.Error:
connected = False
else:
connected = True
except UserSocialAuth.DoesNotExist:
connected = False
return connected
def check_twitter_connection(user):
"""Checks twitter connection exists with the app if not,
then returns False
"""
try:
user_social = UserSocialAuth.objects.get(user=user, provider='twitter')
extra_data = eval(str(user_social.extra_data))
access_tokens = extra_data['access_token']
access_token_list = access_tokens.split('oauth_token_secret=')[1].split('&oauth_token=')
secret = access_token_list[0]
key = access_token_list[1]
auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
auth.set_access_token(key, secret)
api = tweepy.API(auth)
connected = api.verify_credentials()
except UserSocialAuth.DoesNotExist:
connected = False
if connected:
connected = True
return connected
def base64_url_decode(inp):
padding_factor = (4 - len(inp) % 4) % 4
inp += "="*padding_factor
return base64.b64decode(unicode(inp).translate(dict(zip(map(ord, u'-_'), u'+/'))))
def parse_signed_request(signed_request, secret):
"""The signed_request parameter is a simple way to make sure that the data
you're receiving is the actual data sent by Facebook. It is signed using
your application secret which is only known by you and Facebook. If someone
were to make a change to the data, the signature would no longer validate as
they wouldn't know your application secret to also update the signature.
Code snippet parsing "signed_request"
"""
l = signed_request.split('.', 2)
encoded_sig = l[0]
payload = l[1]
sig = base64_url_decode(encoded_sig)
data = json.loads(base64_url_decode(payload))
if data.get('algorithm').upper() != 'HMAC-SHA256':
log.error('Unknown algorithm')
return None
else:
expected_sig = hmac.new(secret, msg=payload, digestmod=hashlib.sha256).digest()
if sig != expected_sig:
return None
else:
log.debug('valid signed request received..')
return data
|
datasets/iuv_crop2full.py | google/retiming | 152 | 12735871 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert UV crops to full UV maps."""
import os
import sys
import json
from PIL import Image
import numpy as np
def place_crop(crop, image, center_x, center_y):
"""Place the crop in the image at the specified location."""
im_height, im_width = image.shape[:2]
crop_height, crop_width = crop.shape[:2]
left = center_x - crop_width // 2
right = left + crop_width
top = center_y - crop_height // 2
bottom = top + crop_height
adjusted_crop = crop # remove regions of crop that go beyond image bounds
if left < 0:
adjusted_crop = adjusted_crop[:, -left:]
if right > im_width:
adjusted_crop = adjusted_crop[:, :(im_width - right)]
if top < 0:
adjusted_crop = adjusted_crop[-top:]
if bottom > im_height:
adjusted_crop = adjusted_crop[:(im_height - bottom)]
crop_mask = (adjusted_crop > 0).astype(crop.dtype).sum(-1, keepdims=True)
image[max(0, top):min(im_height, bottom), max(0, left):min(im_width, right)] *= (1 - crop_mask)
image[max(0, top):min(im_height, bottom), max(0, left):min(im_width, right)] += adjusted_crop
return image
def crop2full(keypoints_path, metadata_path, uvdir, outdir):
"""Create each frame's layer UVs from predicted UV crops"""
with open(keypoints_path) as f:
kp_data = json.load(f)
# Get all people ids
people_ids = set()
for frame in kp_data:
for skeleton in kp_data[frame]:
people_ids.add(skeleton['idx'])
people_ids = sorted(list(people_ids))
with open(metadata_path) as f:
metadata = json.load(f)
orig_size = np.array(metadata['alphapose_input_size'][::-1])
out_size = np.array(metadata['size_LR'][::-1])
if 'people_layers' in metadata:
people_layers = metadata['people_layers']
else:
people_layers = [[pid] for pid in people_ids]
# Create output directories.
for layer_i in range(1, 1 + len(people_layers)):
os.makedirs(os.path.join(outdir, f'{layer_i:02d}'), exist_ok=True)
print(f'Writing UVs to {outdir}')
for frame in sorted(kp_data):
for layer_i, layer in enumerate(people_layers, 1):
out_path = os.path.join(outdir, f'{layer_i:02d}', frame)
sys.stdout.flush()
sys.stdout.write('processing frame %s\r' % out_path)
uv_map = np.zeros([out_size[0], out_size[1], 4])
for person_id in layer:
matches = [p for p in kp_data[frame] if p['idx'] == person_id]
if len(matches) == 0: # person doesn't appear in this frame
continue
skeleton = matches[0]
kps = np.array(skeleton['keypoints']).reshape(17, 3)
# Get kps bounding box.
left = kps[:, 0].min()
right = kps[:, 0].max()
top = kps[:, 1].min()
bottom = kps[:, 1].max()
height = bottom - top
width = right - left
orig_crop_size = max(height, width)
orig_center_x = (left + right) // 2
orig_center_y = (top + bottom) // 2
# read predicted uv map
uv_crop_path = os.path.join(uvdir, f'{person_id:02d}_{os.path.basename(out_path)[:-4]}_output_uv.png')
if os.path.exists(uv_crop_path):
uv_crop = np.array(Image.open(uv_crop_path))
else:
uv_crop = np.zeros([256, 256, 3])
# add person ID channel
person_mask = (uv_crop[..., 0:1] > 0).astype('uint8')
person_ids = (255 - person_id) * person_mask
uv_crop = np.concatenate([uv_crop, person_ids], -1)
# scale crop to desired output size
# 256 is the crop size, 192 is the inner crop size
out_crop_size = orig_crop_size * 256./192 * out_size / orig_size
out_crop_size = out_crop_size.astype(np.int)
uv_crop = uv_crop.astype(np.uint8)
uv_crop = np.array(Image.fromarray(uv_crop).resize((out_crop_size[1], out_crop_size[0]), resample=Image.NEAREST))
# scale center coordinate accordingly
out_center_x = (orig_center_x * out_size[1] / orig_size[1]).astype(np.int)
out_center_y = (orig_center_y * out_size[0] / orig_size[0]).astype(np.int)
# Place UV crop in full UV map and save.
uv_map = place_crop(uv_crop, uv_map, out_center_x, out_center_y)
uv_map = Image.fromarray(uv_map.astype('uint8'))
uv_map.save(out_path)
if __name__ == "__main__":
import argparse
arguments = argparse.ArgumentParser()
arguments.add_argument('--dataroot', type=str)
opt = arguments.parse_args()
keypoints_path = os.path.join(opt.dataroot, 'keypoints.json')
metadata_path = os.path.join(opt.dataroot, 'metadata.json')
uvdir = os.path.join(opt.dataroot, 'kp2uv/test_latest/images')
outdir = os.path.join(opt.dataroot, 'iuv')
crop2full(keypoints_path, metadata_path, uvdir, outdir)
|
main.py | SJHBXShub/Center_Loss | 813 | 12735874 | import os
import sys
import argparse
import datetime
import time
import os.path as osp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import datasets
import models
from utils import AverageMeter, Logger
from center_loss import CenterLoss
parser = argparse.ArgumentParser("Center Loss Example")
# dataset
parser.add_argument('-d', '--dataset', type=str, default='mnist', choices=['mnist'])
parser.add_argument('-j', '--workers', default=4, type=int,
help="number of data loading workers (default: 4)")
# optimization
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--lr-model', type=float, default=0.001, help="learning rate for model")
parser.add_argument('--lr-cent', type=float, default=0.5, help="learning rate for center loss")
parser.add_argument('--weight-cent', type=float, default=1, help="weight for center loss")
parser.add_argument('--max-epoch', type=int, default=100)
parser.add_argument('--stepsize', type=int, default=20)
parser.add_argument('--gamma', type=float, default=0.5, help="learning rate decay")
# model
parser.add_argument('--model', type=str, default='cnn')
# misc
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--save-dir', type=str, default='log')
parser.add_argument('--plot', action='store_true', help="whether to plot features for every epoch")
args = parser.parse_args()
def main():
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + args.dataset + '.txt'))
if use_gpu:
print("Currently using GPU: {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU")
print("Creating dataset: {}".format(args.dataset))
dataset = datasets.create(
name=args.dataset, batch_size=args.batch_size, use_gpu=use_gpu,
num_workers=args.workers,
)
trainloader, testloader = dataset.trainloader, dataset.testloader
print("Creating model: {}".format(args.model))
model = models.create(name=args.model, num_classes=dataset.num_classes)
if use_gpu:
model = nn.DataParallel(model).cuda()
criterion_xent = nn.CrossEntropyLoss()
criterion_cent = CenterLoss(num_classes=dataset.num_classes, feat_dim=2, use_gpu=use_gpu)
optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent)
if args.stepsize > 0:
scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)
start_time = time.time()
for epoch in range(args.max_epoch):
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
train(model, criterion_xent, criterion_cent,
optimizer_model, optimizer_centloss,
trainloader, use_gpu, dataset.num_classes, epoch)
if args.stepsize > 0: scheduler.step()
if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
print("==> Test")
acc, err = test(model, testloader, use_gpu, dataset.num_classes, epoch)
print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def train(model, criterion_xent, criterion_cent,
optimizer_model, optimizer_centloss,
trainloader, use_gpu, num_classes, epoch):
model.train()
xent_losses = AverageMeter()
cent_losses = AverageMeter()
losses = AverageMeter()
if args.plot:
all_features, all_labels = [], []
for batch_idx, (data, labels) in enumerate(trainloader):
if use_gpu:
data, labels = data.cuda(), labels.cuda()
features, outputs = model(data)
loss_xent = criterion_xent(outputs, labels)
loss_cent = criterion_cent(features, labels)
loss_cent *= args.weight_cent
loss = loss_xent + loss_cent
optimizer_model.zero_grad()
optimizer_centloss.zero_grad()
loss.backward()
optimizer_model.step()
# by doing so, weight_cent would not impact on the learning of centers
for param in criterion_cent.parameters():
param.grad.data *= (1. / args.weight_cent)
optimizer_centloss.step()
losses.update(loss.item(), labels.size(0))
xent_losses.update(loss_xent.item(), labels.size(0))
cent_losses.update(loss_cent.item(), labels.size(0))
if args.plot:
if use_gpu:
all_features.append(features.data.cpu().numpy())
all_labels.append(labels.data.cpu().numpy())
else:
all_features.append(features.data.numpy())
all_labels.append(labels.data.numpy())
if (batch_idx+1) % args.print_freq == 0:
print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) CenterLoss {:.6f} ({:.6f})" \
.format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg, cent_losses.val, cent_losses.avg))
if args.plot:
all_features = np.concatenate(all_features, 0)
all_labels = np.concatenate(all_labels, 0)
plot_features(all_features, all_labels, num_classes, epoch, prefix='train')
def test(model, testloader, use_gpu, num_classes, epoch):
model.eval()
correct, total = 0, 0
if args.plot:
all_features, all_labels = [], []
with torch.no_grad():
for data, labels in testloader:
if use_gpu:
data, labels = data.cuda(), labels.cuda()
features, outputs = model(data)
predictions = outputs.data.max(1)[1]
total += labels.size(0)
correct += (predictions == labels.data).sum()
if args.plot:
if use_gpu:
all_features.append(features.data.cpu().numpy())
all_labels.append(labels.data.cpu().numpy())
else:
all_features.append(features.data.numpy())
all_labels.append(labels.data.numpy())
if args.plot:
all_features = np.concatenate(all_features, 0)
all_labels = np.concatenate(all_labels, 0)
plot_features(all_features, all_labels, num_classes, epoch, prefix='test')
acc = correct * 100. / total
err = 100. - acc
return acc, err
def plot_features(features, labels, num_classes, epoch, prefix):
"""Plot features on 2D plane.
Args:
features: (num_instances, num_features).
labels: (num_instances).
"""
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
for label_idx in range(num_classes):
plt.scatter(
features[labels==label_idx, 0],
features[labels==label_idx, 1],
c=colors[label_idx],
s=1,
)
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='upper right')
dirname = osp.join(args.save_dir, prefix)
if not osp.exists(dirname):
os.mkdir(dirname)
save_name = osp.join(dirname, 'epoch_' + str(epoch+1) + '.png')
plt.savefig(save_name, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
hardware/chip/rtl872xd/hal/hal_test/ucube.py | wstong999/AliOS-Things | 4,538 | 12735877 | src = Split('''
hal_test.c
''')
component = aos_component('hal_test', src)
component.add_cflags('-Wall')
component.add_cflags('-Werror')
|
08-def-type-hints/messages/hints_2/messages_test.py | SeirousLee/example-code-2e | 990 | 12735878 | from pytest import mark
from messages import show_count
@mark.parametrize('qty, expected', [
(1, '1 part'),
(2, '2 parts'),
(0, 'no parts'),
])
def test_show_count(qty: int, expected: str) -> None:
got = show_count(qty, 'part')
assert got == expected
# tag::TEST_IRREGULAR[]
@mark.parametrize('qty, expected', [
(1, '1 child'),
(2, '2 children'),
(0, 'no children'),
])
def test_irregular(qty: int, expected: str) -> None:
got = show_count(qty, 'child', 'children')
assert got == expected
# end::TEST_IRREGULAR[]
|
blog/migrations/0002_auto_20150226_2305.py | geminibleep/myblog | 274 | 12735883 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='blogcategory',
options={'ordering': ['name'], 'verbose_name_plural': 'Blog Categories'},
),
migrations.AddField(
model_name='blogpage',
name='date',
field=models.DateField(verbose_name='Post date', default=datetime.datetime(2015, 2, 26, 23, 5, 30, 771014)),
preserve_default=False,
),
]
|
spacy/tests/lang/el/test_noun_chunks.py | snosrap/spaCy | 22,040 | 12735885 | <gh_stars>1000+
import pytest
def test_noun_chunks_is_parsed_el(el_tokenizer):
"""Test that noun_chunks raises Value Error for 'el' language if Doc is not parsed."""
doc = el_tokenizer("είναι χώρα της νοτιοανατολικής")
with pytest.raises(ValueError):
list(doc.noun_chunks)
|
tests/unit/core/providers/aws/s3/_helpers/test_parameters.py | avosper-intellaegis/runway | 134 | 12735889 | <filename>tests/unit/core/providers/aws/s3/_helpers/test_parameters.py
"""Test runway.core.providers.aws.s3._helpers.parameters."""
# pylint: disable=no-self-use
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List
import pytest
from pydantic import ValidationError
from runway.core.providers.aws.s3._helpers.parameters import (
Parameters,
ParametersDataModel,
)
if TYPE_CHECKING:
from pathlib import Path
from pytest_mock import MockerFixture
from runway.core.providers.aws.s3._helpers.parameters import PathsType
class TestParameters:
"""Test Parameters."""
data_locallocal: ParametersDataModel
data_s3s3: ParametersDataModel
data_s3local: ParametersDataModel
def setup_method(self) -> None:
"""Run before each test method if run to return the class instance attrs to default."""
self.data_locallocal = ParametersDataModel(dest="test-dest", src="test-src")
self.data_s3s3 = ParametersDataModel(dest="s3://test-dest", src="s3://test-src")
self.data_s3local = ParametersDataModel(dest="test-dest", src="s3://test-src")
def test_init(self, mocker: MockerFixture) -> None:
"""Test __init__."""
mock_validate_path_args = mocker.patch.object(Parameters, "_validate_path_args")
obj = Parameters("test", self.data_locallocal)
assert obj.action == "test"
assert obj.data == self.data_locallocal
mock_validate_path_args.assert_called_once_with()
@pytest.mark.parametrize(
"cmd, expected",
[("sync", True), ("mb", True), ("rb", True), ("cp", False), ("mv", False)],
)
def test_init_set_dir_op(
self, cmd: str, expected: bool, mocker: MockerFixture
) -> None:
"""Test __init__."""
mocker.patch.object(Parameters, "_validate_path_args")
assert Parameters(cmd, self.data_locallocal).data.dir_op == expected
@pytest.mark.parametrize(
"cmd, expected",
[("sync", False), ("mb", False), ("rb", False), ("cp", False), ("mv", True)],
)
def test_init_set_is_move(
self, cmd: str, expected: bool, mocker: MockerFixture
) -> None:
"""Test __init__."""
mocker.patch.object(Parameters, "_validate_path_args")
assert Parameters(cmd, self.data_locallocal).data.is_move == expected
def test_same_path_mv_locallocal(self) -> None:
"""Test _same_path."""
self.data_locallocal.dest = self.data_locallocal.src
assert Parameters("mv", self.data_locallocal)
def test_same_path_mv_s3s3(self) -> None:
"""Test _same_path."""
self.data_s3s3.dest = self.data_s3s3.src
with pytest.raises(ValueError) as excinfo:
Parameters("mv", self.data_s3s3)
assert "Cannot mv a file onto itself" in str(excinfo.value)
def test_same_path_mv_s3s3_not_same(self) -> None:
"""Test _same_path."""
assert Parameters("mv", self.data_s3s3)
def test_same_path_sync_locallocal(self) -> None:
"""Test _same_path."""
self.data_locallocal.dest = self.data_locallocal.src
assert Parameters("sync", self.data_locallocal)
def test_same_path_sync_s3s3(self) -> None:
"""Test _same_path."""
self.data_s3s3.dest = self.data_s3s3.src
assert Parameters("sync", self.data_s3s3)
def test_validate_path_args_mv_s3local(self, tmp_path: Path) -> None:
"""Test _validate_path_args."""
self.data_s3local.dest = str(tmp_path)
assert Parameters("mv", self.data_s3local)
def test_validate_path_args_mv_s3local_not_exist(self, tmp_path: Path) -> None:
"""Test _validate_path_args."""
missing_dir = tmp_path / "missing"
self.data_s3local.dest = str(missing_dir)
assert Parameters("mv", self.data_s3local)
assert not missing_dir.exists()
def test_validate_path_args_sync_s3local(self, tmp_path: Path) -> None:
"""Test _validate_path_args."""
self.data_s3local.dest = str(tmp_path)
assert Parameters("sync", self.data_s3local)
def test_validate_path_args_sync_s3local_not_exist(self, tmp_path: Path) -> None:
"""Test _validate_path_args."""
missing_dir = tmp_path / "missing"
self.data_s3local.dest = str(missing_dir)
assert Parameters("sync", self.data_s3local)
assert missing_dir.exists()
class TestParametersDataModel:
"""Test ParametersDataModel."""
@pytest.mark.parametrize(
"dest, src, expected",
[
("test-dest", "test-src", "locallocal"),
("test-dest", "s3://test-src", "s3local"),
("s3://test-dest", "test-src", "locals3"),
("s3://test-dest", "s3://test-src", "s3s3"),
],
)
def test_determine_paths_type(
self, dest: str, expected: PathsType, src: str
) -> None:
"""Test _determine_paths_type."""
assert ParametersDataModel(dest=dest, src=src).paths_type == expected
def test_field_defaults(self) -> None:
"""Test field defaults."""
kwargs = {"dest": "test-dest", "src": "test-src"}
obj = ParametersDataModel(**kwargs)
assert obj.dest == kwargs["dest"]
assert obj.src == kwargs["src"]
assert not obj.delete
assert not obj.dir_op
assert not obj.exact_timestamps
assert not obj.follow_symlinks
assert not obj.is_move
assert not obj.only_show_errors
assert not obj.page_size
assert obj.paths_type == "locallocal"
assert not obj.size_only
@pytest.mark.parametrize(
"provided, expected",
[
("s3://test-bucket", "s3://test-bucket/"),
("s3://test-bucket/", "s3://test-bucket/"),
("s3://test-bucket/key.txt", "s3://test-bucket/key.txt"),
("./local", "./local"),
("./local/", "./local/"),
("./local/test.txt", "./local/test.txt"),
],
)
def test_normalize_s3_trailing_slash(self, provided: str, expected: str) -> None:
"""Test _normalize_s3_trailing_slash."""
assert ParametersDataModel(dest=provided, src="test").dest == expected
assert ParametersDataModel(dest="test", src=provided).src == expected
@pytest.mark.parametrize(
"kwargs, error_locs",
[({"dest": "test-dest"}, ["src"]), ({"src": "test-src"}, ["dest"])],
)
def test_required_fields(
self, error_locs: List[str], kwargs: Dict[str, Any]
) -> None:
"""Test required fields."""
with pytest.raises(ValidationError) as excinfo:
ParametersDataModel(**kwargs)
errors = excinfo.value.errors()
for index, loc in enumerate(error_locs):
assert errors[index]["loc"] == (loc,)
|
inselect/gui/views/boxes/__init__.py | NaturalHistoryMuseum/inselect | 128 | 12735896 | <reponame>NaturalHistoryMuseum/inselect<filename>inselect/gui/views/boxes/__init__.py
from .boxes_view import BoxesView # noqa
from .graphics_item_view import GraphicsItemView # noqa
|
corehq/apps/api/migrations/0002_alter_permissions.py | dimagilg/commcare-hq | 471 | 12735932 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-10 20:26
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sqlapiuser',
name='permissions',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=126, null=True), default=list, null=True, size=None),
),
]
|
docs_src/advanced_usage/adv_usage_003.py | cdpath/fastapi_login | 318 | 12735935 | <filename>docs_src/advanced_usage/adv_usage_003.py
@app.post('/login')
def login(response: Response):
...
token = manager.create_access_token(
data=dict(sub=user.email)
)
manager.set_cookie(response, token)
return response |
src/tequila/quantumchemistry/encodings.py | dwierichs/tequila | 214 | 12735950 | """
Collections of Fermion-to-Qubit encodings known to tequila
Most are Interfaces to OpenFermion
"""
from tequila.circuit.circuit import QCircuit
from tequila.circuit.gates import X
from tequila.hamiltonian.qubit_hamiltonian import QubitHamiltonian
import openfermion
def known_encodings():
# convenience for testing and I/O
encodings= {
"JordanWigner":JordanWigner,
"BravyiKitaev":BravyiKitaev,
"BravyiKitaevFast": BravyiKitaevFast,
"BravyiKitaevTree": BravyiKitaevTree,
"TaperedBravyiKitaev": TaperedBravyKitaev
}
# aliases
encodings = {**encodings,
"ReorderedJordanWigner": lambda **kwargs: JordanWigner(up_then_down=True, **kwargs),
"ReorderedBravyiKitaev": lambda **kwargs: BravyiKitaev(up_then_down=True, **kwargs),
"ReorderedBravyiKitaevTree": lambda **kwargs: BravyiKitaevTree(up_then_down=True, **kwargs),
}
return {k.replace("_","").replace("-","").upper():v for k,v in encodings.items()}
class EncodingBase:
@property
def name(self):
prefix=""
if self.up_then_down:
prefix="Reordered"
if hasattr(self, "_name"):
return prefix+self._name
else:
return prefix+type(self).__name__
def __init__(self, n_electrons, n_orbitals, up_then_down=False, *args, **kwargs):
self.n_electrons = n_electrons
self.n_orbitals = n_orbitals
self.up_then_down = up_then_down
def __call__(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> QubitHamiltonian:
"""
:param fermion_operator:
an openfermion FermionOperator
:return:
The openfermion QubitOperator of this class ecoding
"""
if self.up_then_down:
op = openfermion.reorder(operator=fermion_operator, order_function=openfermion.up_then_down, num_modes=2*self.n_orbitals)
else:
op = fermion_operator
fop = self.do_transform(fermion_operator=op, *args, **kwargs)
fop.compress()
return self.post_processing(QubitHamiltonian.from_openfermion(fop))
def post_processing(self, op, *args, **kwargs):
return op
def up(self, i):
if self.up_then_down:
return i
else:
return 2*i
def down(self, i):
if self.up_then_down:
return i+self.n_orbitals
else:
return 2*i+1
def do_transform(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> openfermion.QubitOperator:
raise Exception("{}::do_transform: called base class".format(type(self).__name__))
def map_state(self, state:list, *args, **kwargs) -> list:
"""
Expects a state in spin-orbital ordering
Returns the corresponding qubit state in the class encoding
:param state:
basis-state as occupation number vector in spin orbitals
sorted as: [0_up, 0_down, 1_up, 1_down, ... N_up, N_down]
with N being the number of spatial orbitals
:return:
basis-state as qubit state in the corresponding mapping
"""
"""Does a really lazy workaround ... but it works
:return: Hartree-Fock Reference as binary-number
Parameters
----------
reference_orbitals: list:
give list of doubly occupied orbitals
default is None which leads to automatic list of the
first n_electron/2 orbitals
Returns
-------
"""
# default is a lazy workaround, but it workds
n_qubits = 2 * self.n_orbitals
spin_orbitals = sorted([i for i,x in enumerate(state) if int(x)==1])
string = "1.0 ["
for i in spin_orbitals:
string += str(i) + "^ "
string += "]"
fop = openfermion.FermionOperator(string, 1.0)
op = self(fop)
from tequila.wavefunction.qubit_wavefunction import QubitWaveFunction
wfn = QubitWaveFunction.from_int(0, n_qubits=n_qubits)
wfn = wfn.apply_qubitoperator(operator=op)
assert (len(wfn.keys()) == 1)
key = list(wfn.keys())[0].array
return key
def hcb_to_me(self, *args, **kwargs):
return None
def __str__(self):
return type(self).__name__
class JordanWigner(EncodingBase):
"""
OpenFermion::jordan_wigner
"""
def do_transform(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> openfermion.QubitOperator:
return openfermion.jordan_wigner(fermion_operator, *args, **kwargs)
def map_state(self, state:list, *args, **kwargs):
state = state + [0]*(self.n_orbitals-len(state))
result = [0]*len(state)
if self.up_then_down:
return [state[2*i] for i in range(self.n_orbitals)] + [state[2*i+1] for i in range(self.n_orbitals)]
else:
return state
def hcb_to_me(self, *args, **kwargs):
U = QCircuit()
for i in range(self.n_orbitals):
U += X(target=self.down(i), control=self.up(i))
return U
class BravyiKitaev(EncodingBase):
"""
Uses OpenFermion::bravyi_kitaev
"""
def do_transform(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> openfermion.QubitOperator:
return openfermion.bravyi_kitaev(fermion_operator, n_qubits=self.n_orbitals*2)
class BravyiKitaevTree(EncodingBase):
"""
Uses OpenFermion::bravyi_kitaev_tree
"""
def do_transform(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> openfermion.QubitOperator:
return openfermion.bravyi_kitaev_tree(fermion_operator, n_qubits=self.n_orbitals*2)
class BravyiKitaevFast(EncodingBase):
"""
Uses OpenFermion::bravyi_kitaev_tree
"""
def do_transform(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> openfermion.QubitOperator:
n_qubits = openfermion.count_qubits(fermion_operator)
if n_qubits != self.n_orbitals*2:
raise Exception("BravyiKitaevFast transformation currently only possible for full Hamiltonians (no UCC generators).\nfermion_operator was {}".format(fermion_operator))
op = openfermion.get_interaction_operator(fermion_operator)
return openfermion.bravyi_kitaev_fast(op)
class TaperedBravyKitaev(EncodingBase):
"""
Uses OpenFermion::symmetry_conserving_bravyi_kitaev (tapered bravyi_kitaev_tree arxiv:1701.07072)
Reduces Hamiltonian by 2 qubits
See OpenFermion Documentation for more
Does not work for UCC generators yet
"""
def __init__(self, n_electrons, n_orbitals, active_fermions=None, active_orbitals=None, *args, **kwargs):
if active_fermions is None:
self.active_fermions = n_electrons
else:
self.active_fermions = active_fermions
if active_orbitals is None:
self.active_orbitals = n_orbitals*2 # in openfermion those are spin-orbitals
else:
self.active_orbitals = active_orbitals
if "up_then_down" in kwargs:
raise Exception("Don't pass up_then_down argument to {}, it can't be changed".format(type(self).__name__))
super().__init__(n_orbitals=n_orbitals, n_electrons=n_electrons, up_then_down=False, *args, **kwargs)
def do_transform(self, fermion_operator:openfermion.FermionOperator, *args, **kwargs) -> openfermion.QubitOperator:
if openfermion.count_qubits(fermion_operator) != self.n_orbitals*2:
raise Exception("TaperedBravyiKitaev not ready for UCC generators yet")
return openfermion.symmetry_conserving_bravyi_kitaev(fermion_operator, active_orbitals=self.active_orbitals, active_fermions=self.active_fermions)
def map_state(self, state:list, *args, **kwargs):
non_tapered_trafo = BravyiKitaevTree(up_then_down=True, n_electrons=self.n_electrons, n_orbitals=self.n_orbitals)
key = non_tapered_trafo.map_state(state=state, *args, **kwargs)
n_qubits = self.n_orbitals*2
active_qubits = [i for i in range(n_qubits) if i not in [n_qubits - 1, n_qubits // 2 - 1]]
key = [key[i] for i in active_qubits]
return key
|
tests/test_webdataset.py | neuroailab/ffcv | 1,969 | 12735954 | <gh_stars>1000+
from os import path
from glob import glob
import tempfile
import numpy as np
from tempfile import TemporaryDirectory, NamedTemporaryFile
import torch as ch
from torch.utils.data import Dataset
import webdataset as wds
from ffcv import DatasetWriter
from ffcv.reader import Reader
from ffcv.fields import IntField, FloatField
from test_writer import validate_simple_dataset
field_names = [
'index',
'value.pyd'
]
class DummyDataset(Dataset):
def __init__(self, l):
self.l = l
def __len__(self):
return self.l
def __getitem__(self, index):
if index >= self.l:
raise IndexError()
return (index, np.sin(index))
def write_webdataset(folder, dataset, field_names):
pattern = path.join(folder, "dataset-%06d.tar")
writer = wds.ShardWriter(pattern, maxcount=20)
with writer as sink:
for i, sample in enumerate(dataset):
data = {
'__key__': f'sample_{i}'
}
for field_name, value in zip(field_names, sample):
data[field_name] = value
sink.write(data)
def pipeline(dataset):
return (dataset
.decode()
.to_tuple(*field_names)
)
if __name__ == '__main__':
N = 1007
dataset = DummyDataset(N)
with TemporaryDirectory() as temp_directory:
with NamedTemporaryFile() as handle:
fname = handle.name
write_webdataset(temp_directory, dataset, field_names)
files = glob(path.join(temp_directory, '*'))
files = list(sorted(files))
print(fname)
writer = DatasetWriter(fname, {
'index': IntField(),
'value': FloatField()
})
writer.from_webdataset(files, pipeline)
validate_simple_dataset(fname, N, shuffled=False) |
self_learn.py | cclauss/nonauto-nmt | 262 | 12735992 | <gh_stars>100-1000
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import torch
import numpy as np
from torchtext import data
from torchtext import datasets
from torch.nn import functional as F
from torch.autograd import Variable
import revtok
import logging
import random
import string
import traceback
import math
import uuid
import argparse
import os
import copy
import time
from tqdm import tqdm, trange
from model import Transformer, FastTransformer, INF, TINY, softmax
from utils import NormalField, NormalTranslationDataset, TripleTranslationDataset, ParallelDataset
from utils import Metrics, Best, computeGLEU, computeBLEU, Cache, Batch, masked_sort, unsorted, computeGroupBLEU
from time import gmtime, strftime
import sys
from traceback import extract_tb
from code import interact
def interactive_exception(e_class, e_value, tb):
sys.__excepthook__(e_class, e_value, tb)
tb_stack = extract_tb(tb)
locals_stack = []
while tb is not None:
locals_stack.append(tb.tb_frame.f_locals)
tb = tb.tb_next
while len(tb_stack) > 0:
frame = tb_stack.pop()
ls = locals_stack.pop()
print('\nInterpreter at file "{}", line {}, in {}:'.format(
frame.filename, frame.lineno, frame.name))
print(' {}'.format(frame.line.strip()))
interact(local=ls)
#sys.excepthook = interactive_exception
# check dirs
for d in ['models', 'runs', 'logs']:
if not os.path.exists('./{}'.format(d)):
os.mkdir('./{}'.format(d))
# params
parser = argparse.ArgumentParser(description='Train a Transformer model.')
# data
parser.add_argument('--data_prefix', type=str, default='../data/')
parser.add_argument('--dataset', type=str, default='iwslt', help='"flickr" or "iwslt"')
parser.add_argument('--language', type=str, default='ende', help='a combination of two language markers to show the language pair.')
parser.add_argument('--load_vocab', action='store_true', help='load a pre-computed vocabulary')
parser.add_argument('--load_dataset', action='store_true', help='load a pre-processed dataset')
parser.add_argument('--use_revtok', action='store_true', help='use reversible tokenization')
parser.add_argument('--level', type=str, default='subword', help='for BPE, we must preprocess the dataset')
parser.add_argument('--good_course', action='store_true', help='use beam-search output for distillation')
parser.add_argument('--test_set', type=str, default=None, help='which test set to use')
parser.add_argument('--max_len', type=int, default=None, help='limit the train set sentences to this many tokens')
parser.add_argument('--remove_eos', action='store_true', help='possibly remove <eos> tokens for FastTransformer')
# model basic
parser.add_argument('--prefix', type=str, default='', help='prefix to denote the model, nothing or [time]')
parser.add_argument('--params', type=str, default='james-iwslt', help='pamarater sets: james-iwslt, t2t-base, etc')
parser.add_argument('--fast', dest='model', action='store_const', const=FastTransformer,
default=Transformer, help='use a single self-attn stack')
# model variants
parser.add_argument('--local', dest='windows', action='store_const', const=[1, 3, 5, 7, -1],
default=None, help='use local attention')
parser.add_argument('--causal', action='store_true', help='use causal attention')
parser.add_argument('--positional_attention', action='store_true', help='incorporate positional information in key/value')
parser.add_argument('--no_source', action='store_true')
parser.add_argument('--use_mask', action='store_true', help='use src/trg mask during attention')
parser.add_argument('--diag', action='store_true', help='ignore diagonal attention when doing self-attention.')
parser.add_argument('--convblock', action='store_true', help='use ConvBlock instead of ResNet')
parser.add_argument('--cosine_output', action='store_true', help='use cosine similarity as output layer')
parser.add_argument('--noisy', action='store_true', help='inject noise in the attention mechanism: Beta-Gumbel softmax')
parser.add_argument('--noise_samples', type=int, default=0, help='only useful for noisy parallel decoding')
parser.add_argument('--critic', action='store_true', help='use critic')
parser.add_argument('--kernel_sizes', type=str, default='2,3,4,5', help='kernel sizes of convnet critic')
parser.add_argument('--kernel_num', type=int, default=128, help='number of each kind of kernel')
parser.add_argument('--use_wo', action='store_true', help='use output weight matrix in multihead attention')
parser.add_argument('--share_embeddings', action='store_true', help='share embeddings between encoder and decoder')
parser.add_argument('--use_alignment', action='store_true', help='use the aligned fake data to initialize')
parser.add_argument('--hard_inputs', action='store_true', help='use hard selection as inputs, instead of soft-attention over embeddings.')
parser.add_argument('--preordering', action='store_true', help='use the ground-truth reordering information')
parser.add_argument('--use_posterior_order', action='store_true', help='directly use the groud-truth alignment for reordering.')
parser.add_argument('--train_decoder_with_order', action='store_true', help='when training the decoder, use the ground-truth')
parser.add_argument('--postordering', action='store_true', help='just have a try...')
parser.add_argument('--fertility_only', action='store_true')
parser.add_argument('--highway', action='store_true', help='usually false')
parser.add_argument('--mix_of_experts', action='store_true')
parser.add_argument('--orderless', action='store_true', help='for the inputs, remove the order information')
parser.add_argument('--cheating', action='store_true', help='disable decoding, always use real fertility')
# running
parser.add_argument('--mode', type=str, default='train', help='train, test or build')
parser.add_argument('--gpu', type=int, default=0, help='GPU to use or -1 for CPU')
parser.add_argument('--seed', type=int, default=19920206, help='seed for randomness')
parser.add_argument('--eval-every', type=int, default=1000, help='run dev every')
parser.add_argument('--maximum_steps', type=int, default=1000000, help='maximum steps you take to train a model')
parser.add_argument('--disable_lr_schedule', action='store_true', help='disable the transformer learning rate')
parser.add_argument('--batchsize', type=int, default=2048, help='# of tokens processed per batch')
parser.add_argument('--hidden_size', type=int, default=None, help='input the hidden size')
parser.add_argument('--length_ratio', type=int, default=2, help='maximum lengths of decoding')
parser.add_argument('--optimizer', type=str, default='Adam')
parser.add_argument('--beam_size', type=int, default=1, help='beam-size used in Beamsearch, default using greedy decoding')
parser.add_argument('--alpha', type=float, default=0.6, help='length normalization weights')
parser.add_argument('--temperature', type=float, default=1, help='smoothing temperature for noisy decoding')
parser.add_argument('--multi_run', type=int, default=1, help='we can run the code multiple times to get the best')
parser.add_argument('--load_from', type=str, default=None, help='load from checkpoint')
parser.add_argument('--resume', action='store_true', help='when loading from the saved model, it resumes from that.')
parser.add_argument('--teacher', type=str, default=None, help='load a pre-trained auto-regressive model.')
parser.add_argument('--share_encoder', action='store_true', help='use teacher-encoder to initialize student')
parser.add_argument('--finetune_encoder', action='store_true', help='if further train the encoder')
parser.add_argument('--seq_dist', action='store_true', help='knowledge distillation at sequence level')
parser.add_argument('--word_dist', action='store_true', help='knowledge distillation at word level')
parser.add_argument('--greedy_fertility', action='store_true', help='using the fertility generated by autoregressive model (only for seq_dist)')
parser.add_argument('--fertility_mode', type=str, default='argmax', help='mean, argmax or reinforce')
parser.add_argument('--finetuning_truth', action='store_true', help='use ground-truth for finetuning')
parser.add_argument('--trainable_teacher', action='store_true', help='have a trainable teacher')
parser.add_argument('--only_update_errors', action='store_true', help='have a trainable teacher')
parser.add_argument('--teacher_use_real', action='store_true', help='teacher also trained with MLE on real data')
parser.add_argument('--max_cache', type=int, default=0, help='save most recent max_cache decoded translations')
parser.add_argument('--replay_every', type=int, default=1000, help='every 1k updates, train the teacher again')
parser.add_argument('--replay_times', type=int, default=250, help='train the teacher again for 250k steps')
parser.add_argument('--margin', type=float, default=1.5, help='margin to make sure teacher will give higher score to real data')
parser.add_argument('--real_data', action='store_true', help='only used in the reverse kl setting')
parser.add_argument('--beta1', type=float, default=0.5, help='balancing MLE and KL loss.')
parser.add_argument('--beta2', type=float, default=0.01, help='balancing the GAN loss.')
parser.add_argument('--critic_only', type=int, default=0, help='pre-training the critic model.')
parser.add_argument('--st', action='store_true', help='straight through estimator')
parser.add_argument('--entropy', action='store_true')
parser.add_argument('--no_bpe', action='store_true', help='output files without BPE')
parser.add_argument('--no_write', action='store_true', help='do not write the decoding into the decoding files.')
parser.add_argument('--output_fer', action='store_true', help='decoding and output fertilities')
# debugging
parser.add_argument('--check', action='store_true', help='on training, only used to check on the test set.')
parser.add_argument('--debug', action='store_true', help='debug mode: no saving or tensorboard')
parser.add_argument('--tensorboard', action='store_true', help='use TensorBoard')
# old params
parser.add_argument('--old', action='store_true', help='this is used for solving conflicts of new codes')
parser.add_argument('--hyperopt', action='store_true', help='use HyperOpt')
parser.add_argument('--scst', action='store_true', help='use HyperOpt')
parser.add_argument('--serve', type=int, default=None, help='serve at port')
parser.add_argument('--attention_discrimination', action='store_true')
# ---------------------------------------------------------------------------------------------------------------- #
args = parser.parse_args()
if args.prefix == '[time]':
args.prefix = strftime("%m.%d_%H.%M.", gmtime())
args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]
# get the langauage pairs:
args.src = args.language[:2] # source language
args.trg = args.language[2:] # target language
# logger settings
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s: - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
fh = logging.FileHandler('./logs/log-{}.txt'.format(args.prefix))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
# setup random seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# setup data-field
DataField = data.ReversibleField if args.use_revtok else NormalField
tokenizer = revtok.tokenize if args.use_revtok else lambda x: x.replace('@@ ', '').split()
TRG = DataField(init_token='<init>', eos_token='<eos>', batch_first=True)
SRC = DataField(batch_first=True) if not args.share_embeddings else TRG
ALIGN = data.Field(sequential=True, preprocessing=data.Pipeline(lambda tok: int(tok.split('-')[0])), use_vocab=False, pad_token=0, batch_first=True)
FER = data.Field(sequential=True, preprocessing=data.Pipeline(lambda tok: int(tok)), use_vocab=False, pad_token=0, batch_first=True)
align_dict, align_table = None, None
# setup many datasets (need to manaually setup)
data_prefix = args.data_prefix
if args.dataset == 'iwslt':
if args.test_set is None:
args.test_set = 'IWSLT16.TED.tst2013'
if args.dist_set is None:
args.dist_set = '.dec.b1'
elif args.greedy_fertility:
logger.info('use the fertility predicted by autoregressive model (instead of fast-align)')
train_data, dev_data = ParallelDataset.splits(
path=data_prefix + 'iwslt/en-de/', train='train.en-de.bpe.new',
validation='IWSLT16.TED.tst2013.en-de.bpe.new.dev', exts=('.src.b1', '.trg.b1', '.dec.b1', '.fer', '.fer'),
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('fer', FER), ('fer_dec', FER)],
load_dataset=args.load_dataset, prefix='ts')
elif (args.mode == 'test') or (args.mode == 'test_noisy'):
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'iwslt/en-de/', train='train.tags.en-de{}'.format(
'.bpe' if not args.use_revtok else ''),
validation='{}.en-de{}'.format(
args.test_set, '.bpe' if not args.use_revtok else ''), exts=('.en', '.de'),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='normal')
else:
train_data, dev_data = ParallelDataset.splits(
path=data_prefix + 'iwslt/en-de/', train='train.tags.en-de.bpe',
validation='train.tags.en-de.bpe.dev', exts=('.en2', '.de2', '.decoded2', '.aligned', '.decode.aligned', '.fer', '.decode.fer'),
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('align', ALIGN), ('align_dec', ALIGN), ('fer', FER), ('fer_dec', FER)],
load_dataset=args.load_dataset, prefix='ts')
decoding_path = data_prefix + 'iwslt/en-de/{}.en-de.bpe.new'
if args.use_alignment and (args.model is FastTransformer):
align_dict = {l.split()[0]: l.split()[1] for l in open(data_prefix + 'iwslt/en-de/train.tags.en-de.dict')}
elif args.dataset == 'wmt16-ende':
if args.test_set is None:
args.test_set = 'newstest2013'
if (args.mode == 'test') or (args.mode == 'test_noisy'):
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-ende/', train='newstest2013.tok.bpe.32000',
validation='{}.tok.bpe.32000'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-ende/test.{}.{}'.format(args.prefix, args.test_set)
elif not args.seq_dist:
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-ende/', train='train.tok.clean.bpe.32000',
validation='{}.tok.bpe.32000'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-ende/{}.tok.bpe.decode'
else:
train_data, dev_data = ParallelDataset.splits(
path=data_prefix + 'wmt16-ende/', train='train.tok.bpe.decode',
validation='newstest2013.tok.bpe.decode.dev',
exts=('.src.b1', '.trg.b1', '.dec.b1', '.real.aligned', '.fake.aligned', '.real.fer', '.fake.fer'),
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('align', ALIGN), ('align_dec', ALIGN), ('fer', FER), ('fer_dec', FER)],
load_dataset=args.load_dataset, prefix='ts')
decoding_path = data_prefix + 'wmt16-ende/{}.tok.bpe.na'
if args.use_alignment and (args.model is FastTransformer):
align_table = {l.split()[0]: l.split()[1] for l in
open(data_prefix + 'wmt16-ende/train.tok.bpe.decode.full.fastlign2.dict')}
elif args.dataset == 'wmt16-deen':
if args.test_set is None:
args.test_set = 'newstest2013'
if (args.mode == 'test') or (args.mode == 'test_noisy'):
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-ende/', train='newstest2013.tok.bpe.32000',
validation='{}.tok.bpe.32000'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-ende/test.{}.{}'.format(args.prefix, args.test_set)
elif not args.seq_dist:
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-deen/', train='train.tok.clean.bpe.32000',
validation='{}.tok.bpe.32000'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-deen/{}.tok.bpe.decode'
else:
train_data, dev_data = ParallelDataset.splits(
path=data_prefix + 'wmt16-deen/', train='train.tok.bpe.decode',
validation='{}.tok.bpe.decode.dev'.format(args.test_set),
exts=('.src.b1', '.trg.b1', '.dec.b1', '.real.aligned', '.fake.aligned', '.real.fer', '.fake.fer'),
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('align', ALIGN), ('align_dec', ALIGN), ('fer', FER), ('fer_dec', FER)],
load_dataset=args.load_dataset, prefix='ts')
decoding_path = data_prefix + 'wmt16-deen/{}.tok.bpe.na'
if args.use_alignment and (args.model is FastTransformer):
align_table = {l.split()[0]: l.split()[1] for l in
open(data_prefix + 'wmt16-deen/train.tok.bpe.decode.full.fastlign2.dict')}
elif args.dataset == 'wmt16-enro':
if args.test_set is None:
args.test_set = 'dev'
if (args.mode == 'test') or (args.mode == 'test_noisy'):
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-enro/', train='dev.bpe',
validation='{}.bpe'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-enro/{}.bpe.decode'
elif not args.seq_dist:
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-enro/', train='corpus.bpe',
validation='{}.bpe'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-enro/{}.bpe.decode'
else:
train_data, dev_data = ParallelDataset.splits(
path=data_prefix + 'wmt16-enro/', train='train.bpe.decode',
validation='dev.bpe.decode.dev',
exts=('.src.b1', '.trg.b1', '.dec.b1', '.real.aligned', '.fake.aligned', '.real.fer', '.fake.fer'),
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('align', ALIGN), ('align_dec', ALIGN), ('fer', FER), ('fer_dec', FER)],
load_dataset=args.load_dataset, prefix='ts')
decoding_path = data_prefix + 'wmt16-enro/{}.tok.bpe.na'
if args.use_alignment and (args.model is FastTransformer):
align_table = {l.split()[0]: l.split()[1] for l in
open(data_prefix + 'wmt16-enro/train.bpe.decode.full.fastlign2.dict')}
elif args.dataset == 'wmt16-roen':
if args.test_set is None:
args.test_set = 'dev'
if (args.mode == 'test') or (args.mode == 'test_noisy'):
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-roen/', train='dev.bpe',
validation='{}.bpe'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-roen/{}.bpe.decode'
elif not args.seq_dist:
train_data, dev_data = NormalTranslationDataset.splits(
path=data_prefix + 'wmt16-roen/', train='corpus.bpe',
validation='{}.bpe'.format(args.test_set), exts=('.{}'.format(args.src), '.{}'.format(args.trg)),
fields=(SRC, TRG), load_dataset=args.load_dataset, prefix='real')
decoding_path = data_prefix + 'wmt16-roen/{}.bpe.decode'
else:
train_data, dev_data = ParallelDataset.splits(
path=data_prefix + 'wmt16-roen/', train='train.bpe.decode',
validation='dev.bpe.decode.dev',
exts=('.src.b1', '.trg.b1', '.dec.b1', '.real.aligned', '.fake.aligned', '.real.fer', '.fake.fer'),
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('align', ALIGN), ('align_dec', ALIGN), ('fer', FER), ('fer_dec', FER)],
load_dataset=args.load_dataset, prefix='ts')
decoding_path = data_prefix + 'wmt16-roen/{}.tok.bpe.na'
if args.use_alignment and (args.model is FastTransformer):
align_table = {l.split()[0]: l.split()[1] for l in
open(data_prefix + 'wmt16-roen/train.bpe.decode.full.fastlign2.dict')}
else:
raise NotImplementedError
# build word-level vocabularies
if args.load_vocab and os.path.exists(data_prefix + '{}/vocab{}_{}.pt'.format(
args.dataset, 'shared' if args.share_embeddings else '', '{}-{}'.format(args.src, args.trg))):
logger.info('load saved vocabulary.')
src_vocab, trg_vocab = torch.load(data_prefix + '{}/vocab{}_{}.pt'.format(
args.dataset, 'shared' if args.share_embeddings else '', '{}-{}'.format(args.src, args.trg)))
SRC.vocab = src_vocab
TRG.vocab = trg_vocab
else:
logger.info('save the vocabulary')
if not args.share_embeddings:
SRC.build_vocab(train_data, dev_data, max_size=50000)
TRG.build_vocab(train_data, dev_data, max_size=50000)
torch.save([SRC.vocab, TRG.vocab], data_prefix + '{}/vocab{}_{}.pt'.format(
args.dataset, 'shared' if args.share_embeddings else '', '{}-{}'.format(args.src, args.trg)))
args.__dict__.update({'trg_vocab': len(TRG.vocab), 'src_vocab': len(SRC.vocab)})
# build alignments ---
if align_dict is not None:
align_table = [TRG.vocab.stoi['<init>'] for _ in range(len(SRC.vocab.itos))]
for src in align_dict:
align_table[SRC.vocab.stoi[src]] = TRG.vocab.stoi[align_dict[src]]
align_table[0] = 0 # --<unk>
align_table[1] = 1 # --<pad>
def dyn_batch_with_padding(new, i, sofar):
prev_max_len = sofar / (i - 1) if i > 1 else 0
if args.seq_dist:
return max(len(new.src), len(new.trg), len(new.dec), prev_max_len) * i
else:
return max(len(new.src), len(new.trg), prev_max_len) * i
def dyn_batch_without_padding(new, i, sofar):
if args.seq_dist:
return sofar + max(len(new.src), len(new.trg), len(new.dec))
else:
return sofar + max(len(new.src), len(new.trg))
# build the dataset iterators
# work around torchtext making it hard to share vocabs without sharing other field properties
if args.share_embeddings:
SRC = copy.deepcopy(SRC)
SRC.init_token = None
SRC.eos_token = None
train_data.fields['src'] = SRC
dev_data.fields['src'] = SRC
if (args.model is FastTransformer) and (args.remove_eos):
TRG.eos_token = None
if args.max_len is not None:
train_data.examples = [ex for ex in train_data.examples if len(ex.trg) <= args.max_len]
if args.batchsize == 1: # speed-test: one sentence per batch.
batch_size_fn = lambda new, count, sofar: count
else:
batch_size_fn = dyn_batch_without_padding if args.model is Transformer else dyn_batch_with_padding
train_real, dev_real = data.BucketIterator.splits(
(train_data, dev_data), batch_sizes=(args.batchsize, args.batchsize), device=args.gpu,
batch_size_fn=batch_size_fn,
repeat=None if args.mode == 'train' else False)
logger.info("build the dataset. done!")
# model hyper-params:
hparams = None
if args.dataset == 'iwslt':
if args.params == 'james-iwslt':
hparams = {'d_model': 278, 'd_hidden': 507, 'n_layers': 5,
'n_heads': 2, 'drop_ratio': 0.079, 'warmup': 746} # ~32
elif args.params == 'james-iwslt2':
hparams = {'d_model': 278, 'd_hidden': 2048, 'n_layers': 5,
'n_heads': 2, 'drop_ratio': 0.079, 'warmup': 746} # ~32
teacher_hparams = {'d_model': 278, 'd_hidden': 507, 'n_layers': 5,
'n_heads': 2, 'drop_ratio': 0.079, 'warmup': 746}
elif args.dataset == 'wmt16-ende':
logger.info('use default parameters of t2t-base')
hparams = {'d_model': 512, 'd_hidden': 512, 'n_layers': 6,
'n_heads': 8, 'drop_ratio': 0.1, 'warmup': 16000} # ~32
teacher_hparams = hparams
elif args.dataset == 'wmt16-deen':
logger.info('use default parameters of t2t-base')
hparams = {'d_model': 512, 'd_hidden': 512, 'n_layers': 6,
'n_heads': 8, 'drop_ratio': 0.1, 'warmup': 16000} # ~32
teacher_hparams = hparams
elif args.dataset == 'wmt16-enro':
logger.info('use default parameters of t2t-base')
hparams = {'d_model': 512, 'd_hidden': 512, 'n_layers': 6,
'n_heads': 8, 'drop_ratio': 0.1, 'warmup': 16000} # ~32
teacher_hparams = hparams
elif args.dataset == 'wmt16-roen':
logger.info('use default parameters of t2t-base')
hparams = {'d_model': 512, 'd_hidden': 512, 'n_layers': 6,
'n_heads': 8, 'drop_ratio': 0.1, 'warmup': 16000} # ~32
teacher_hparams = hparams
if hparams is None:
logger.info('use default parameters of t2t-base')
hparams = {'d_model': 512, 'd_hidden': 512, 'n_layers': 6,
'n_heads': 8, 'drop_ratio': 0.1, 'warmup': 16000} # ~32
if args.teacher is not None:
teacher_args = copy.deepcopy(args)
teacher_args.__dict__.update(teacher_hparams)
args.__dict__.update(hparams)
if args.hidden_size is not None:
args.d_hidden = args.hidden_size
# show the arg:
logger.info(args)
hp_str = (f"{args.dataset}_{args.level}_{'fast_' if args.model is FastTransformer else ''}"
f"{args.d_model}_{args.d_hidden}_{args.n_layers}_{args.n_heads}_"
f"{args.drop_ratio:.3f}_{args.warmup}_"
f"{args.xe_until if hasattr(args, 'xe_until') else ''}_"
f"{f'{args.xe_ratio:.3f}' if hasattr(args, 'xe_ratio') else ''}_"
f"{args.xe_every if hasattr(args, 'xe_every') else ''}")
logger.info(f'Starting with HPARAMS: {hp_str}')
model_name = './models/' + args.prefix + hp_str
# build the model
model = args.model(SRC, TRG, args)
if args.load_from is not None:
with torch.cuda.device(args.gpu): # very important.
model.load_state_dict(torch.load('./models/' + args.load_from + '.pt',
map_location=lambda storage, loc: storage.cuda())) # load the pretrained models.
if args.critic:
model.install_critic()
# logger.info(str(model))
# if using a teacher
if args.teacher is not None:
teacher_model = Transformer(SRC, TRG, teacher_args)
with torch.cuda.device(args.gpu):
teacher_model.load_state_dict(torch.load('./models/' + args.teacher + '.pt',
map_location=lambda storage, loc: storage.cuda()))
for params in teacher_model.parameters():
if args.trainable_teacher:
params.requires_grad = True
else:
params.requires_grad = False
if (args.share_encoder) and (args.load_from is None):
model.encoder = copy.deepcopy(teacher_model.encoder)
for params in model.encoder.parameters():
if args.finetune_encoder:
params.requires_grad = True
else:
params.requires_grad = False
else:
teacher_model = None
# use cuda
if args.gpu > -1:
model.cuda(args.gpu)
if align_table is not None:
align_table = torch.LongTensor(align_table).cuda(args.gpu)
align_table = Variable(align_table)
model.alignment = align_table
if args.teacher is not None:
teacher_model.cuda(args.gpu)
def register_nan_checks(m):
def check_grad(module, grad_input, grad_output):
if any(np.any(np.isnan(gi.data.cpu().numpy())) for gi in grad_input if gi is not None):
print('NaN gradient in ' + type(module).__name__)
1/0
m.apply(lambda module: module.register_backward_hook(check_grad))
def get_learning_rate(i, lr0=0.1):
if not args.disable_lr_schedule:
return lr0 * 10 / math.sqrt(args.d_model) * min(
1 / math.sqrt(i), i / (args.warmup * math.sqrt(args.warmup)))
return 0.00002
def export(x):
try:
with torch.cuda.device(args.gpu):
return x.data.cpu().float().mean()
except Exception:
return 0
def devol(batch):
new_batch = copy.copy(batch)
new_batch.src = Variable(batch.src.data, volatile=True)
return new_batch
# register_nan_checks(model)
# register_nan_checks(teacher_model)
def valid_model(model, dev, dev_metrics=None, distillation=False, print_out=False, teacher_model=None):
print_seqs = ['[sources]', '[targets]', '[decoded]', '[fertili]', '[origind]']
trg_outputs, dec_outputs = [], []
outputs = {}
model.eval()
if teacher_model is not None:
teacher_model.eval()
for j, dev_batch in enumerate(dev):
# decode from the model (whatever Transformer or FastTransformer)
torch.cuda.nvtx.range_push('quick_prepare')
inputs, input_masks, targets, target_masks, sources, source_masks, encoding, batch_size = model.quick_prepare(dev_batch, distillation)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push('prepare_initial')
decoder_inputs, input_reorder, reordering_cost = inputs, None, None
if type(model) is FastTransformer:
# batch_align = dev_batch.align_dec if distillation else dev_batch.align
batch_align = None
batch_fer = dev_batch.fer_dec if distillation else dev_batch.fer
# if args.postordering:
#
# targets_sorted = targets.gather(1, align_index)
# batch_align_sorted, align_index = masked_sort(batch_align, target_masks) # change the target indexxx, batch x max_trg
decoder_inputs, input_reorder, decoder_masks, reordering_cost = model.prepare_initial(encoding,
sources, source_masks, input_masks,
batch_align, batch_fer, decoding=(not args.cheating), mode='argmax')
else:
decoder_masks = input_masks
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push('model')
decoding, out, probs = model(encoding, source_masks, decoder_inputs, decoder_masks, decoding=True, return_probs=True)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push('batched_cost')
loss = 0
if args.postordering:
if args.cheating:
decoding1 = unsorted(decoding, align_index)
else:
positions = model.predict_offset(out, decoder_masks, None)
shifted_index = positions.sort(1)[1]
decoding1 = unsorted(decoding, shifted_index)
else:
decoding1 = decoding
# loss = model.batched_cost(targets, target_masks, probs)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push('output_decoding')
dev_outputs = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', decoding1), ('src', input_reorder)]]
if args.postordering:
dev_outputs += [model.output_decoding(('trg', decoding))]
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push('computeGLEU')
gleu = computeGLEU(dev_outputs[2], dev_outputs[1], corpus=False, tokenizer=tokenizer)
torch.cuda.nvtx.range_pop()
if print_out:
for k, d in enumerate(dev_outputs):
logger.info("{}: {}".format(print_seqs[k], d[0]))
logger.info('------------------------------------------------------------------')
if teacher_model is not None: # teacher is Transformer, student is FastTransformer
inputs_student, _, targets_student, _, _, _, encoding_teacher, _ = teacher_model.quick_prepare(dev_batch, False, decoding, decoding,
input_masks, target_masks, source_masks)
teacher_real_loss = teacher_model.cost(targets, target_masks,
out=teacher_model(encoding_teacher, source_masks, inputs, input_masks))
teacher_fake_out = teacher_model(encoding_teacher, source_masks, inputs_student, input_masks)
teacher_fake_loss = teacher_model.cost(targets_student, target_masks, out=teacher_fake_out)
teacher_alter_loss = teacher_model.cost(targets, target_masks, out=teacher_fake_out)
trg_outputs += dev_outputs[1]
dec_outputs += dev_outputs[2]
if dev_metrics is not None:
values = [loss, gleu]
if teacher_model is not None:
values += [teacher_real_loss, teacher_fake_loss,
teacher_real_loss - teacher_fake_loss,
teacher_alter_loss,
teacher_alter_loss - teacher_fake_loss]
if reordering_cost is not None:
values += [reordering_cost]
dev_metrics.accumulate(batch_size, *values)
corpus_gleu = computeGLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
corpus_bleu = computeBLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
outputs['corpus_gleu'] = corpus_gleu
outputs['corpus_bleu'] = corpus_bleu
if dev_metrics is not None:
logger.info(dev_metrics)
logger.info("The dev-set corpus GLEU = {}".format(corpus_gleu))
logger.info("The dev-set corpus BLEU = {}".format(corpus_bleu))
return outputs
def train_model(model, train, dev, teacher_model=None):
if args.tensorboard and (not args.debug):
from tensorboardX import SummaryWriter
writer = SummaryWriter('./runs/{}'.format(args.prefix+hp_str))
# optimizer
if args.optimizer == 'Adam':
opt = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], betas=(0.9, 0.98), eps=1e-9)
if args.trainable_teacher:
opt_teacher = torch.optim.Adam([p for p in teacher_model.parameters() if p.requires_grad], betas=(0.9, 0.98), eps=1e-9)
elif args.optimizer == 'RMSprop':
opt = torch.optim.RMSprop([p for p in model.parameters() if p.requires_grad], eps=1e-9)
if args.trainable_teacher:
opt_teacher = torch.optim.RMSprop([p for p in teacher_model.parameters() if p.requires_grad], eps=1e-9)
else:
raise NotImplementedError
# if resume training
if (args.load_from is not None) and (args.resume):
with torch.cuda.device(args.gpu): # very important.
offset, opt_states = torch.load('./models/' + args.load_from + '.pt.states',
map_location=lambda storage, loc: storage.cuda())
opt.load_state_dict(opt_states)
else:
offset = 0
# metrics
best = Best(max, 'corpus_bleu', 'corpus_gleu', 'gleu', 'loss', 'i', model=model, opt=opt, path=model_name, gpu=args.gpu)
train_metrics = Metrics('train', 'loss', 'real', 'fake')
dev_metrics = Metrics('dev', 'loss', 'gleu', 'real_loss', 'fake_loss', 'distance', 'alter_loss', 'distance2', 'reordering_loss', 'corpus_gleu')
progressbar = tqdm(total=args.eval_every, desc='start training.')
# cache
if args.max_cache > 0:
caches = Cache(args.max_cache, args.gpu)
for iters, batch in enumerate(train):
iters += offset
if iters > args.maximum_steps:
logger.info('reach the maximum updating steps.')
break
if iters % args.eval_every == 0:
progressbar.close()
dev_metrics.reset()
if args.seq_dist:
outputs_course = valid_model(model, dev, dev_metrics,
distillation=True, teacher_model=None)#teacher_model=teacher_model)
if args.trainable_teacher:
outputs_teacher = valid_model(teacher_model, dev, None)
outputs_data = valid_model(model, dev, None if args.seq_dist else dev_metrics, teacher_model=None, print_out=True)
if args.tensorboard and (not args.debug):
writer.add_scalar('dev/GLEU_sentence_', dev_metrics.gleu, iters)
writer.add_scalar('dev/Loss', dev_metrics.loss, iters)
writer.add_scalar('dev/GLEU_corpus_', outputs_data['corpus_gleu'], iters)
writer.add_scalar('dev/BLEU_corpus_', outputs_data['corpus_bleu'], iters)
if args.seq_dist:
writer.add_scalar('dev/GLEU_corpus_dis', outputs_course['corpus_gleu'], iters)
writer.add_scalar('dev/BLEU_corpus_dis', outputs_course['corpus_bleu'], iters)
if args.trainable_teacher:
writer.add_scalar('dev/GLEU_corpus_teacher', outputs_teacher['corpus_gleu'], iters)
writer.add_scalar('dev/BLEU_corpus_teacher', outputs_teacher['corpus_bleu'], iters)
if args.teacher is not None:
writer.add_scalar('dev/Teacher_real_loss', dev_metrics.real_loss, iters)
writer.add_scalar('dev/Teacher_fake_loss', dev_metrics.fake_loss, iters)
writer.add_scalar('dev/Teacher_alter_loss', dev_metrics.alter_loss, iters)
writer.add_scalar('dev/Teacher_distance', dev_metrics.distance, iters)
writer.add_scalar('dev/Teacher_distance2', dev_metrics.distance2, iters)
if args.preordering:
writer.add_scalar('dev/Reordering_loss', dev_metrics.reordering_loss, iters)
if not args.debug:
best.accumulate(outputs_data['corpus_bleu'], outputs_data['corpus_gleu'], dev_metrics.gleu, dev_metrics.loss, iters)
logger.info('the best model is achieved at {}, average greedy GLEU={}, corpus GLEU={}, corpus BLEU={}'.format(
best.i, best.gleu, best.corpus_gleu, best.corpus_bleu))
logger.info('model:' + args.prefix + hp_str)
# ---set-up a new progressor---
progressbar = tqdm(total=args.eval_every, desc='start training.')
# --- training --- #
# try:
model.train()
opt.param_groups[0]['lr'] = get_learning_rate(iters + 1)
opt.zero_grad()
# prepare the data
inputs, input_masks, targets, target_masks, sources, source_masks, encoding, batch_size = model.quick_prepare(batch, args.seq_dist)
input_reorder, reordering_cost, decoder_inputs = None, None, inputs
batch_align = None # batch.align_dec if args.seq_dist else batch.align
batch_fer = batch.fer_dec if args.seq_dist else batch.fer
# batch_align_sorted, align_index = masked_sort(batch_align, target_masks) # change the target indexxx, batch x max_trg
# print(batch_fer.size(), input_masks.size(), source_masks.size(), sources.size())
# Prepare_Initial
if type(model) is FastTransformer:
inputs, input_reorder, input_masks, reordering_cost = model.prepare_initial(encoding, sources, source_masks, input_masks, batch_align, batch_fer)
# Maximum Likelihood Training
feedback = {}
if not args.word_dist:
loss = model.cost(targets, target_masks, out=model(encoding, source_masks, inputs, input_masks, positions= None, feedback=feedback))
# train the reordering also using MLE??
if args.preordering:
loss += reordering_cost
else:
# only used for FastTransformer: word-level adjustment
if not args.preordering:
decoding, out, probs = model(encoding, source_masks, inputs, input_masks, return_probs=True, decoding=True)
loss_student = model.batched_cost(targets, target_masks, probs) # student-loss (MLE)
decoder_masks = input_masks
else: # Note that MLE and decoding has different translations. We need to run the same code twice
if args.finetuning_truth:
decoding, out, probs = model(encoding, source_masks, inputs, input_masks, decoding=True, return_probs=True, feedback=feedback)
loss_student = model.cost(targets, target_masks, out=out)
decoder_masks = input_masks
else:
if args.fertility_mode != 'reinforce':
loss_student = model.cost(targets, target_masks, out=model(encoding, source_masks, inputs, input_masks, positions=None, feedback=feedback))
decoder_inputs, _, decoder_masks, _ = model.prepare_initial(encoding, sources, source_masks, input_masks,
batch_align, batch_fer, decoding=True, mode=args.fertility_mode)
decoding, out, probs = model(encoding, source_masks, decoder_inputs, decoder_masks, decoding=True, return_probs=True) # decode again
else:
# truth
decoding, out, probs = model(encoding, source_masks, inputs, input_masks, decoding=True, return_probs=True, feedback=feedback)
loss_student = model.cost(targets, target_masks, out=out)
decoder_masks = input_masks
# baseline
decoder_inputs_b, _, decoder_masks_b, _ = model.prepare_initial(encoding, sources, source_masks, input_masks,
batch_align, batch_fer, decoding=True, mode='mean')
decoding_b, out_b, probs_b = model(encoding, source_masks, decoder_inputs_b, decoder_masks_b, decoding=True, return_probs=True) # decode again
# reinforce
decoder_inputs_r, _, decoder_masks_r, _ = model.prepare_initial(encoding, sources, source_masks, input_masks,
batch_align, batch_fer, decoding=True, mode='reinforce')
decoding_r, out_r, probs_r = model(encoding, source_masks, decoder_inputs_r, decoder_masks_r, decoding=True, return_probs=True) # decode again
# train the reordering also using MLE??
if args.preordering:
loss_student += reordering_cost
# teacher tries translation + look-at student's output
teacher_model.eval()
if args.fertility_mode != 'reinforce':
inputs_student_index, _, targets_student_soft, _, _, _, encoding_teacher, _ = model.quick_prepare(batch, False, decoding, probs, decoder_masks, decoder_masks, source_masks)
out_teacher, probs_teacher = teacher_model(encoding_teacher, source_masks, inputs_student_index.detach(), decoder_masks, return_probs=True)
loss_teacher = teacher_model.batched_cost(targets_student_soft, decoder_masks, probs_teacher.detach())
loss = (1 - args.beta1) * loss_teacher + args.beta1 * loss_student # final results
else:
inputs_student_index, _, targets_student_soft, _, _, _, encoding_teacher, _ = model.quick_prepare(batch, False, decoding, probs, decoder_masks, decoder_masks, source_masks)
out_teacher, probs_teacher = teacher_model(encoding_teacher, source_masks, inputs_student_index.detach(), decoder_masks, return_probs=True)
loss_teacher = teacher_model.batched_cost(targets_student_soft, decoder_masks, probs_teacher.detach())
inputs_student_index, _ = model.prepare_inputs(batch, decoding_b, False, decoder_masks_b)
targets_student_soft, _ = model.prepare_targets(batch, probs_b, False, decoder_masks_b)
out_teacher, probs_teacher = teacher_model(encoding_teacher, source_masks, inputs_student_index.detach(), decoder_masks_b, return_probs=True)
_, loss_1= teacher_model.batched_cost(targets_student_soft, decoder_masks_b, probs_teacher.detach(), True)
inputs_student_index, _ = model.prepare_inputs(batch, decoding_r, False, decoder_masks_r)
targets_student_soft, _ = model.prepare_targets(batch, probs_r, False, decoder_masks_r)
out_teacher, probs_teacher = teacher_model(encoding_teacher, source_masks, inputs_student_index.detach(), decoder_masks_r, return_probs=True)
_, loss_2= teacher_model.batched_cost(targets_student_soft, decoder_masks_r, probs_teacher.detach(), True)
rewards = -(loss_2 - loss_1).data
# if rewards.size(0) != 1:
rewards = rewards - rewards.mean() # ) / (rewards.std() + TINY)
rewards = rewards.expand_as(source_masks)
rewards = rewards * source_masks
# print(model.predictor.saved_fertilities)
# print(batch.src.size())
model.predictor.saved_fertilities.reinforce(0.1 * rewards.contiguous().view(-1, 1))
loss = (1 - args.beta1) * loss_teacher + args.beta1 * loss_student #+ 0 * model.predictor.saved_fertilities.float().sum() # detect reinforce
# loss = 0 * model.predictor.saved_fertilities.float().sum() # detect reinforce
# accmulate the training metrics
train_metrics.accumulate(batch_size, loss, print_iter=None)
train_metrics.reset()
# train the student
if args.preordering and args.fertility_mode == 'reinforce':
torch.autograd.backward((loss, model.predictor.saved_fertilities),
(torch.ones(1).cuda(loss.get_device()), None))
else:
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 1)
opt.step()
info = 'training step={}, loss={:.3f}, lr={:.5f}'.format(iters, export(loss), opt.param_groups[0]['lr'])
if args.word_dist:
info += '| NA:{:.3f}, AR:{:.3f}'.format(export(loss_student), export(loss_teacher))
if args.trainable_teacher and (args.max_cache <= 0):
loss_alter, loss_worse = export(loss_alter), export(loss_worse)
info += '| AL:{:.3f}, WO:{:.3f}'.format(loss_alter, loss_worse)
if args.preordering:
info += '| RE:{:.3f}'.format(export(reordering_cost))
if args.fertility_mode == 'reinforce':
info += '| RL: {:.3f}'.format(export(rewards.mean()))
if args.max_cache > 0:
info += '| caches={}'.format(len(caches.cache))
if args.tensorboard and (not args.debug):
writer.add_scalar('train/Loss', export(loss), iters)
progressbar.update(1)
progressbar.set_description(info)
# continue-training the teacher model
if args.trainable_teacher:
if args.max_cache > 0:
caches.add([batch.src, batch.trg, batch.dec, decoding]) # experience-reply
# trainable teacher: used old experience to train
if (iters+1) % args.replay_every == 0:
# ---set-up a new progressor: teacher training--- #
progressbar_teacher = tqdm(total=args.replay_times, desc='start training the teacher.')
for j in range(args.replay_times):
opt_teacher.param_groups[0]['lr'] = get_learning_rate(iters + 1)
opt_teacher.zero_grad()
src, trg, dec, decoding = caches.sample()
batch = Batch(src, trg, dec)
inputs, input_masks, targets, target_masks, sources, source_masks, encoding_teacher, batch_size = teacher_model.quick_prepare(batch, (not args.teacher_use_real))
inputs_students, _ = teacher_model.prepare_inputs(batch, decoding, masks=input_masks)
loss_alter = teacher_model.cost(targets, target_masks, out=teacher_model(encoding_teacher, source_masks, inputs_students, input_masks))
loss_worse = teacher_model.cost(targets, target_masks, out=teacher_model(encoding_teacher, source_masks, inputs, input_masks))
loss2 = loss_alter + loss_worse
loss2.backward()
opt_teacher.step()
info = 'teacher step={}, loss={:.3f}, alter={:.3f}, worse={:.3f}'.format(j, export(loss2), export(loss_alter), export(loss_worse))
progressbar_teacher.update(1)
progressbar_teacher.set_description(info)
progressbar_teacher.close()
# except Exception as e:
# logger.warn('caught an exception: {}'.format(e))
def decode_model(model, train_real, dev_real, evaluate=True, decoding_path=None, names=['en', 'de', 'decode']):
if train_real is None:
logger.info('decoding from the devlopment set. beamsize={}, alpha={}'.format(args.beam_size, args.alpha))
dev = dev_real
else:
logger.info('decoding from the training set. beamsize={}, alpha={}'.format(args.beam_size, args.alpha))
dev = train_real
dev.train = False # make the Iterator create Variables with volatile=True so no graph is built
progressbar = tqdm(total=sum([1 for _ in dev]), desc='start decoding')
model.eval()
if decoding_path is not None:
decoding_path = decoding_path.format(args.test_set if train_real is None else 'train')
handle_dec = open(decoding_path + '.{}'.format(names[2]), 'w')
handle_src = open(decoding_path + '.{}'.format(names[0]), 'w')
handle_trg = open(decoding_path + '.{}'.format(names[1]), 'w')
if args.output_fer:
handle_fer = open(decoding_path + '.{}'.format('fer'), 'w')
corpus_size = 0
src_outputs, trg_outputs, dec_outputs, timings = [], [], [], []
decoded_words, target_words, decoded_info = 0, 0, 0
attentions = None #{'source': None, 'target': None}
pad_id = model.decoder.field.vocab.stoi['<pad>']
eos_id = model.decoder.field.vocab.stoi['<eos>']
curr_time = 0
for iters, dev_batch in enumerate(dev):
start_t = time.time()
inputs, input_masks, targets, target_masks, sources, source_masks, encoding, batch_size = model.quick_prepare(dev_batch)
if args.model is FastTransformer:
decoder_inputs, input_reorder, decoder_masks, _ = model.prepare_initial(encoding, sources, source_masks, input_masks,
None, None, decoding=True, mode=args.fertility_mode)
else:
decoder_inputs, decoder_masks = inputs, input_masks
decoding = model(encoding, source_masks, decoder_inputs, decoder_masks, beam=args.beam_size, alpha=args.alpha, decoding=True, feedback=attentions)
used_t = time.time() - start_t
curr_time += used_t
real_mask = 1 - ((decoding.data == eos_id) + (decoding.data == pad_id)).float()
outputs = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', decoding)]]
def DHondt(approx, mask):
L = mask.size(1)
w = torch.arange(1, 2 * L, 2)
if approx.is_cuda:
w = w.cuda(approx.get_device())
w = 1 / w # 1, 1/2, 1/3, ...
approx = approx[:, :, None] @ w[None, :] # B x Ts x Tt
approx = approx.view(approx.size(0), -1) # B x (Ts x Tt)
appinx = approx.topk(L, 1)[1] # B x Tt (index)
fertility = approx.new(*approx.size()).fill_(0).scatter_(1, appinx, mask)
fertility = fertility.contiguous().view(mask.size(0), -1, mask.size(1)).sum(2).long()
return fertility
def cutoff(s, t):
for i in range(len(s), 0, -1):
if s[i-1] != t:
return s[:i]
raise IndexError
if args.output_fer:
source_attention = attentions['source'].data.mean(1).transpose(2, 1) # B x Ts x Tt
source_attention *= real_mask[:, None, :]
approx_fertility = source_attention.sum(2) # B x Ts
fertility = DHondt(approx_fertility, real_mask)
corpus_size += batch_size
src_outputs += outputs[0]
trg_outputs += outputs[1]
dec_outputs += outputs[2]
timings += [used_t]
if decoding_path is not None:
for s, t, d in zip(outputs[0], outputs[1], outputs[2]):
if args.no_bpe:
s, t, d = s.replace('@@ ', ''), t.replace('@@ ', ''), d.replace('@@ ', '')
print(s, file=handle_src, flush=True)
print(t, file=handle_trg, flush=True)
print(d, file=handle_dec, flush=True)
if args.output_fer:
with torch.cuda.device_of(fertility):
fertility = fertility.tolist()
for f in fertility:
f = ' '.join([str(fi) for fi in cutoff(f, 0)])
print(f, file=handle_fer, flush=True)
progressbar.update(1)
progressbar.set_description('finishing sentences={}/batches={}, speed={} sec/batch'.format(corpus_size, iters, curr_time / (1 + iters)))
if evaluate:
corpus_gleu = computeGLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
corpus_bleu = computeBLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
logger.info("The dev-set corpus GLEU = {}".format(corpus_gleu))
logger.info("The dev-set corpus BLEU = {}".format(corpus_bleu))
computeGroupBLEU(dec_outputs, trg_outputs, tokenizer=tokenizer)
torch.save([src_outputs, trg_outputs, dec_outputs, timings], './space/data.pt')
def noisy_decode_model(model, dev_real, samples=1, alpha=1, tau=1, teacher_model=None, evaluate=True,
decoding_path=None, names=['en', 'de', 'decode'], saveall=False):
assert type(model) is FastTransformer, 'only works for fastTransformer'
logger.info('decoding from the devlopment set. beamsize={}, alpha={}, tau={}'.format(args.beam_size, args.alpha, args.temperature))
dev = dev_real
progressbar = tqdm(total=sum([1 for _ in dev]), desc='start decoding')
model.eval()
teacher_model.eval()
if decoding_path is not None:
decoding_path = decoding_path.format(args.test_set if train_real is None else 'train')
handle_dec = open(decoding_path + '.{}'.format(names[2]), 'w')
handle_src = open(decoding_path + '.{}'.format(names[0]), 'w')
handle_trg = open(decoding_path + '.{}'.format(names[1]), 'w')
# if saveall:
# handle_fer = open(decoding_path + '.{}'.format(names[3]), 'w')
corpus_size = 0
src_outputs, trg_outputs, dec_outputs, timings = [], [], [], []
all_dec_outputs = []
decoded_words, target_words, decoded_info = 0, 0, 0
attentions = None #{'source': None, 'target': None}
pad_id = model.decoder.field.vocab.stoi['<pad>']
eos_id = model.decoder.field.vocab.stoi['<eos>']
curr_time = 0
for iters, dev_batch in enumerate(dev):
start_t = time.time()
inputs, input_masks, targets, target_masks, sources, source_masks0, encoding0, batch_size = model.quick_prepare(dev_batch)
if teacher_model is not None:
encoding_teacher = teacher_model.encoding(sources, source_masks0)
batch_size, src_len, hsize = encoding0[0].size()
if samples > 1:
source_masks = source_masks0[:, None, :].expand(batch_size, samples,
src_len).contiguous().view(batch_size * samples, src_len)
encoding = [None for _ in encoding0]
for i in range(len(encoding)):
encoding[i] = encoding0[i][:, None, :].expand(
batch_size, samples, src_len, hsize).contiguous().view(batch_size * samples, src_len, hsize)
if teacher_model is not None:
for i in range(len(encoding)):
encoding_teacher[i] = encoding_teacher[i][:, None, :].expand(
batch_size, samples, src_len, hsize).contiguous().view(batch_size * samples, src_len, hsize)
def parallel():
decoder_inputs, input_reorder, decoder_masks, logits_fer = model.prepare_initial(encoding0, sources, source_masks0, input_masks,
None, None, decoding=True, mode=args.fertility_mode, N=samples, tau=tau)
if teacher_model is not None:
decoding = model(encoding, source_masks, decoder_inputs, decoder_masks, decoding=True, feedback=attentions)
student_inputs, _ = teacher_model.prepare_inputs(dev_batch, decoding, decoder_masks)
student_targets, _ = teacher_model.prepare_targets(dev_batch, decoding, decoder_masks)
out, probs = teacher_model(encoding_teacher, source_masks, student_inputs, decoder_masks, return_probs=True, decoding=False)
_, teacher_loss = model.batched_cost(student_targets, decoder_masks, probs, batched=True) # student-loss (MLE)
# reranking the translation
teacher_loss = teacher_loss.view(batch_size, samples)
decoding = decoding.view(batch_size, samples, -1)
lp = decoder_masks.sum(1).view(batch_size, samples) ** (1 - alpha)
teacher_loss = teacher_loss * Variable(lp)
return decoding, teacher_loss, input_reorder
if args.multi_run > 1:
decodings, teacher_losses, _ = zip(*[parallel() for _ in range(args.multi_run)])
maxl = max([d.size(2) for d in decodings])
decoding = Variable(sources.data.new(batch_size, samples * args.multi_run, maxl).fill_(1).long())
for i, d in enumerate(decodings):
decoding[:, i * samples: (i+1) * samples, :d.size(2)] = d
teacher_loss = torch.cat(teacher_losses, 1)
else:
decoding, teacher_loss, input_reorder = parallel()
all_dec_outputs += [(decoding.view(batch_size * samples, -1), input_reorder)]
selected_idx = (-teacher_loss).topk(1, 1)[1] # batch x 1
decoding = decoding.gather(1, selected_idx[:, :, None].expand(batch_size, 1, decoding.size(-1)))[:, 0, :]
used_t = time.time() - start_t
curr_time += used_t
real_mask = 1 - ((decoding.data == eos_id) + (decoding.data == pad_id)).float()
outputs = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', decoding)]]
corpus_size += batch_size
src_outputs += outputs[0]
trg_outputs += outputs[1]
dec_outputs += outputs[2]
timings += [used_t]
if decoding_path is not None:
for s, t, d in zip(outputs[0], outputs[1], outputs[2]):
if args.no_bpe:
s, t, d = s.replace('@@ ', ''), t.replace('@@ ', ''), d.replace('@@ ', '')
print(s, file=handle_src, flush=True)
print(t, file=handle_trg, flush=True)
print(d, file=handle_dec, flush=True)
# if saveall:
# for d, f in all_dec_outputs:
# ds = model.output_decoding(('trg', d))
# fs = model.output_decoding(('src', f))
# for dd, ff in zip(ds, fs):
# print(dd, file=handle_fer, flush=True)
# print(ff, file=handle_fer, flush=True)
progressbar.update(1)
progressbar.set_description('finishing sentences={}/batches={} speed={} sec/batch'.format(corpus_size, iters, curr_time / (1 + iters)))
if evaluate:
corpus_gleu = computeGLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
corpus_bleu = computeBLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
logger.info("The dev-set corpus GLEU = {}".format(corpus_gleu))
logger.info("The dev-set corpus BLEU = {}".format(corpus_bleu))
computeGroupBLEU(dec_outputs, trg_outputs, tokenizer=tokenizer)
torch.save([src_outputs, trg_outputs, dec_outputs, timings], './space/data.pt')
def self_improving_model(model, train, dev):
if args.tensorboard and (not args.debug):
from tensorboardX import SummaryWriter
writer = SummaryWriter('./runs/self-{}'.format(args.prefix+hp_str))
# optimizer
if args.optimizer == 'Adam':
opt = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], betas=(0.9, 0.98), eps=1e-9)
if args.trainable_teacher:
opt_teacher = torch.optim.Adam([p for p in teacher_model.parameters() if p.requires_grad], betas=(0.9, 0.98), eps=1e-9)
elif args.optimizer == 'RMSprop':
opt = torch.optim.RMSprop([p for p in model.parameters() if p.requires_grad], eps=1e-9)
if args.trainable_teacher:
opt_teacher = torch.optim.RMSprop([p for p in teacher_model.parameters() if p.requires_grad], eps=1e-9)
else:
raise NotImplementedError
# if resume training --
if (args.load_from is not None) and (args.resume):
with torch.cuda.device(args.gpu): # very important.
offset, opt_states = torch.load('./models/' + args.load_from + '.pt.states',
map_location=lambda storage, loc: storage.cuda())
opt.load_state_dict(opt_states)
else:
offset = 0
# metrics
best = Best(max, 'corpus_bleu', 'corpus_gleu', 'gleu', 'loss', 'i', model=model, opt=opt, path=model_name, gpu=args.gpu)
train_metrics = Metrics('train', 'loss', 'real', 'fake')
dev_metrics = Metrics('dev', 'loss', 'gleu', 'real_loss', 'fake_loss', 'distance', 'alter_loss', 'distance2', 'reordering_loss', 'corpus_gleu')
progressbar = tqdm(total=args.eval_every, desc='start training.')
# cache
samples = 100
tau = 1
caches = Cache(args.max_cache, ['src', 'trg', 'dec', 'fer'])
best_model = copy.deepcopy(model) # used for decoding
best_score = 0
# start loop
iters = offset
train = iter(train)
counters = 0
while iters <= args.maximum_steps:
iters += 1
counters += 1
batch = devol(next(train))
# prepare inputs
model.eval()
inputs, input_masks, \
targets, target_masks, \
sources, source_masks0, encoding, batch_size = model.quick_prepare(batch)
_, src_len, hsize = encoding[0].size()
trg_len = targets.size(1)
# prepare parallel -- noisy sampling
decoder_inputs, input_reorder, decoder_masks, _, pred_fer \
= model.prepare_initial(encoding, sources, source_masks0, input_masks,
None, None, decoding=True, mode='reinforce',
N=samples, tau=tau, return_samples=True)
# repeating for decoding
source_masks = source_masks0[:, None, :].expand(batch_size, samples,
src_len).contiguous().view(batch_size * samples, src_len)
for i in range(len(encoding)):
encoding[i] = encoding[i][:, None, :].expand(
batch_size, samples, src_len, hsize).contiguous().view(batch_size * samples, src_len, hsize)
# run decoding
decoding, _, probs = best_model(encoding, source_masks, decoder_inputs, decoder_masks, decoding=True, return_probs=True)
# compute GLEU score to select the best translation
trg_output = best_model.output_decoding(('trg', targets[:, None, :].expand(batch_size,
samples, trg_len).contiguous().view(batch_size * samples, trg_len)))
dec_output = best_model.output_decoding(('trg', decoding))
bleu_score = computeBLEU(dec_output, trg_output, corpus=False, tokenizer=tokenizer).contiguous().view(batch_size, samples).cuda(args.gpu)
best_index = bleu_score.max(1)[1]
def index_gather(data, index, samples):
batch_size = index.size(0)
data = data.contiguous().view(batch_size, samples, -1) # batch x samples x dim
index = index[:, None, None].expand(batch_size, 1, data.size(2))
return data.gather(1, index)[:, 0, :]
best_decoding, best_decoder_masks, best_fertilities = [index_gather(x, best_index, samples) for x in [decoding, decoder_masks, pred_fer]]
caches.add([sources, targets, best_decoding, best_fertilities],
[source_masks0, target_masks, best_decoder_masks, source_masks0],
['src', 'trg', 'dec', 'fer'])
progressbar.update(1)
progressbar.set_description('caching sentences={}/batches={}'.format(len(caches.cache), iters))
if counters == args.eval_every:
logger.info('build a new dataset from the caches')
print(len(caches.cache))
cache_data = ParallelDataset(examples=caches.cache,
fields=[('src', SRC), ('trg', TRG), ('dec', TRG), ('fer', FER)])
cache_iter = data.BucketIterator(cache_data, batch_sizes=2048, device=args.gpu, batch_size_fn=batch_size_fn)
print('done')
import sys;sys.exit(1)
if False: # iters % args.eval_every == 0:
progressbar.close()
dev_metrics.reset()
outputs_data = valid_model(model, dev, None if args.seq_dist else dev_metrics, teacher_model=None, print_out=True)
if args.tensorboard and (not args.debug):
writer.add_scalar('dev/GLEU_sentence_', dev_metrics.gleu, iters)
writer.add_scalar('dev/Loss', dev_metrics.loss, iters)
writer.add_scalar('dev/GLEU_corpus_', outputs_data['corpus_gleu'], iters)
writer.add_scalar('dev/BLEU_corpus_', outputs_data['corpus_bleu'], iters)
if not args.debug:
best.accumulate(outputs_data['corpus_bleu'], outputs_data['corpus_gleu'], dev_metrics.gleu, dev_metrics.loss, iters)
logger.info('the best model is achieved at {}, average greedy GLEU={}, corpus GLEU={}, corpus BLEU={}'.format(
best.i, best.gleu, best.corpus_gleu, best.corpus_bleu))
logger.info('model:' + args.prefix + hp_str)
# ---set-up a new progressor---
progressbar = tqdm(total=args.eval_every, desc='start training.')
if args.mode == 'train':
logger.info('starting training')
train_model(model, train_real, dev_real, teacher_model)
elif args.mode == 'self':
logger.info('starting self-training')
self_improving_model(model, train_real, dev_real)
elif args.mode == 'test':
logger.info('starting decoding from the pre-trained model, test...')
names = ['dev.src.b{}={}.{}'.format(args.beam_size, args.load_from, args,fertility_mode),
'dev.trg.b{}={}.{}'.format(args.beam_size, args.load_from, args,fertility_mode),
'dev.dec.b{}={}.{}'.format(args.beam_size, args.load_from, args,fertility_mode)]
decode_model(model, None, dev_real, evaluate=True, decoding_path=decoding_path if not args.no_write else None, names=names)
elif args.mode == 'test_noisy':
logger.info('starting decoding from the pre-trained model, test...')
names = ['dev.src.b{}={}.noise{}'.format(args.beam_size, args.load_from, args.beam_size),
'dev.trg.b{}={}.noise{}'.format(args.beam_size, args.load_from, args.beam_size),
'dev.dec.b{}={}.noise{}'.format(args.beam_size, args.load_from, args.beam_size),
'dev.fer.b{}={}.noise{}'.format(args.beam_size, args.load_from, args.beam_size)]
noisy_decode_model(model, dev_real, samples=args.beam_size, alpha=args.alpha, tau=args.temperature,
teacher_model=teacher_model, evaluate=True, decoding_path=decoding_path if not args.no_write else None,
names=names, saveall=True)
else:
logger.info('starting decoding from the pre-trained model, build the course dataset...')
names = ['src.b{}'.format(args.beam_size), 'trg.b{}'.format(args.beam_size), 'dec.b{}'.format(args.beam_size)]
decode_model(model, train_real, dev_real, decoding_path=decoding_path if not args.no_write else None, names=names)
logger.info("done.")
|
samples/virtual_gallery_tutorial/reset_tutorial_folder.py | jkabalar/kapture-localization | 118 | 12736018 | #!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
import os.path as path
import path_to_kapture_localization # noqa: F401
import kapture_localization.utils.path_to_kapture # noqa: F401
from kapture.utils.paths import safe_remove_any_path
HERE_PATH = path.normpath(path.dirname(__file__))
colmap_sfm_folder = path.join(HERE_PATH, 'colmap-sfm')
colmap_localization_folder = path.join(HERE_PATH, 'colmap-localization')
sift_colmap_vocab_tree_folder = path.join(HERE_PATH, 'sift_colmap_vocab_tree')
ir_bench_folder = path.join(HERE_PATH, 'image_retrieval_benchmark')
if path.isdir(colmap_sfm_folder):
safe_remove_any_path(colmap_sfm_folder, force=False)
if path.isdir(colmap_localization_folder):
safe_remove_any_path(colmap_localization_folder, force=False)
if path.isdir(sift_colmap_vocab_tree_folder):
safe_remove_any_path(sift_colmap_vocab_tree_folder, force=False)
if path.isdir(ir_bench_folder):
safe_remove_any_path(ir_bench_folder, force=False)
matches_no_gv_folder = path.join(HERE_PATH, 'local_features/r2d2_500/NN_no_gv')
if path.isdir(matches_no_gv_folder):
safe_remove_any_path(matches_no_gv_folder, force=False)
matches_colmap_gv_folder = path.join(HERE_PATH, 'local_features/r2d2_500/NN_colmap_gv')
if path.isdir(matches_colmap_gv_folder):
safe_remove_any_path(matches_colmap_gv_folder, force=False)
|
tools/perf/core/oauth_api.py | zipated/src | 2,151 | 12736089 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""API for generating OAuth2 access tokens from service account
keys predeployed to Chrome Ops bots via Puppet.
"""
import contextlib
import os
import subprocess
import tempfile
@contextlib.contextmanager
def with_access_token(service_account_json):
"""Yields an access token for the service account.
Args:
service_account_json: The path to the service account JSON file.
"""
fd, path = tempfile.mkstemp(suffix='.json', prefix='tok')
try:
args = ['luci-auth', 'token']
if service_account_json:
args += ['-service-account-json', service_account_json]
subprocess.check_call(args, stdout=fd)
os.close(fd)
fd = None
yield path
finally:
if fd is not None:
os.close(fd)
os.remove(path)
|
contrib/cookiecutter/ckan_extension/{{cookiecutter.project}}/ckanext/{{cookiecutter.project_shortname}}/logic/action.py | gg2/ckan | 2,805 | 12736103 | <filename>contrib/cookiecutter/ckan_extension/{{cookiecutter.project}}/ckanext/{{cookiecutter.project_shortname}}/logic/action.py<gh_stars>1000+
import ckan.plugins.toolkit as tk
import ckanext.{{cookiecutter.project_shortname}}.logic.schema as schema
@tk.side_effect_free
def {{cookiecutter.project_shortname}}_get_sum(context, data_dict):
tk.check_access(
"{{cookiecutter.project_shortname}}_get_sum", context, data_dict)
data, errors = tk.navl_validate(
data_dict, schema.{{
cookiecutter.project_shortname}}_get_sum(), context)
if errors:
raise tk.ValidationError(errors)
return {
"left": data["left"],
"right": data["right"],
"sum": data["left"] + data["right"]
}
def get_actions():
return {
'{{cookiecutter.project_shortname}}_get_sum': {{
cookiecutter.project_shortname
}}_get_sum,
}
|
examples/optimizers/swarm/create_ssa.py | anukaal/opytimizer | 528 | 12736129 | <reponame>anukaal/opytimizer
from opytimizer.optimizers.swarm import SSA
# Creates a SSA optimizer
o = SSA()
|
condensa/compressor.py | stormymcstorm/condensa | 153 | 12736131 | <filename>condensa/compressor.py<gh_stars>100-1000
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn
class Compressor(object):
"""Condensa model compressor class."""
def __init__(self,
opt,
scheme,
model,
trainloader,
testloader,
valloader,
criterion):
"""
Creates a `Compressor` instance.
:param opt: Optimizer.
:type opt: `condensa.Optimizer`
:param scheme: Compression scheme (class).
:param model: PyTorch model.
:type model: `torch.nn.Module`
:param trainloader: Training dataloader.
:param testloader: Test dataloader.
:param valloader: Validation dataloader.
:param criterion: Loss criterion.
"""
assert isinstance(model, torch.nn.Module)
self.opt = opt
self.pi = scheme.pi
self.delta = scheme.delta
self.model = model
self.trainloader = trainloader
self.testloader = testloader
self.valloader = valloader
self.criterion = criterion
self._statistics = None
@property
def statistics(self):
"""
Retrieves compressed model statistics.
:return: Model statistics.
:rtype: `dict`
"""
return self._statistics
def run(self):
"""
Executes model compressor.
:return: Compressed model.
:rtype: `torch.nn.Module`
"""
w, statistics = self.opt.compress(self.model, self.pi, self.delta,
self.trainloader, self.testloader,
self.valloader, self.criterion)
self._statistics = statistics
return w
|
script/lib/sccache.py | LaudateCorpus1/libchromiumcontent | 315 | 12736134 | import os
import subprocess
import sys
from config import TOOLS_DIR
VERSION = 'aad2120'
SUPPORTED_PLATFORMS = {
'cygwin': 'windows',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'windows',
}
def is_platform_supported(platform):
return platform in SUPPORTED_PLATFORMS
def get_binary_path():
platform = sys.platform
if not is_platform_supported(platform):
return None
platform_dir = SUPPORTED_PLATFORMS[platform]
path = os.path.join(TOOLS_DIR, 'sccache', VERSION, platform_dir, 'sccache')
if platform_dir == 'windows':
path += '.exe'
return path
def run(*args):
binary_path = get_binary_path()
if binary_path is None:
raise Exception('No sccache binary found for the current platform.')
call_args = [binary_path] + list(args)
return subprocess.call(call_args)
|
tests/functional/test_index.py | zuiko42/picobrew_pico | 142 | 12736155 | <gh_stars>100-1000
from app import create_app
def test_home_page():
"""
GIVEN a Flask application configured for testing
WHEN the '/' page is requested (GET)
THEN check that the response is valid
"""
flask_app = create_app('flask_test.cfg')
# Create a test client using the Flask application configured for testing
with flask_app.test_client() as test_client:
response = test_client.get('/')
assert response.status_code == 200
assert b"<title>PicoBrew Server</title>" in response.data
|
tests/integration/test_user_feed.py | sourcery-ai-bot/tiktokpy | 324 | 12736177 | import pytest
from loguru import logger
from tiktokpy import TikTokPy
from tiktokpy.models.feed import FeedItem
@pytest.mark.asyncio()
async def test_user_feed(bot: TikTokPy):
feed = await bot.user_feed(username="@mileycyrus")
logger.info(feed)
assert len(feed) == 50
assert isinstance(feed[0], FeedItem)
|
benchmark/plot.py | sail-sg/envpool | 330 | 12736186 | <filename>benchmark/plot.py<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import matplotlib.ticker as ticker
import pandas as pd
import seaborn as sns
data = {}
def reset_data() -> None:
global data
data = {
"Num. Workers": [],
"FPS": [],
"Env": [],
"System": [],
"Method": [],
}
def parse_table(env: str, system: str, suffix: str) -> None:
private_copy = {
"Num. Workers": [],
"FPS": [],
"Env": [],
"System": [],
"Method": [],
}
sep = f"<!-- {env} - {system} -->"
raw = open("README.md").read().split(sep)[1].strip().splitlines()
worker_num = list(map(int, raw[0].split("|")[2:-1]))
for line in raw[2:]:
line = line.split("|")[1:-1]
method = line.pop(0).strip()
for w, f in zip(worker_num, line):
for d in [data, private_copy]:
d["Num. Workers"].append(w)
d["FPS"].append(None if f.strip() == "/" else float(f))
d["Env"].append(env)
d["System"].append(system)
d["Method"].append(method)
d = pd.DataFrame(private_copy)
plot = sns.lineplot(
x="Num. Workers", y="FPS", hue="Method", data=d, marker="o"
)
plot.xaxis.set_major_formatter(ticker.EngFormatter())
plot.yaxis.set_major_formatter(ticker.EngFormatter())
plot.legend(fontsize=9)
plot.set_title(f"{env} throughput, {system}")
plot.set_xlabel("Num. Workers")
frame_skip = {"Atari": 4, "Mujoco": 5}[env]
plot.set_ylabel(f"FPS, frameskip = {frame_skip}")
plot.get_figure().savefig(f"{env}_{system}.{suffix}")
plot.get_figure().clf()
def benchmark(suffix: str) -> None:
global data
reset_data()
for env in ["Atari", "Mujoco"]:
for system in ["Laptop", "Workstation", "TPU-VM", "DGX-A100"]:
parse_table(env, system, suffix)
data = pd.DataFrame(data)
print(data.groupby(["Env", "Method", "System"]).max())
def mapping(x, y, **kwargs):
plot = sns.lineplot(x=x, y=y, **kwargs)
plot.xaxis.set_major_formatter(ticker.EngFormatter())
plot.yaxis.set_major_formatter(ticker.EngFormatter())
g = sns.FacetGrid(
data,
row="Env",
col="System",
hue="Method",
height=3,
aspect=1.6,
sharex=False,
sharey=False,
)
g.map(mapping, "Num. Workers", "FPS", marker="o")
g.add_legend(bbox_to_anchor=(0.52, 1.02), ncol=6)
axes = g.axes.flatten()
alphabet = "abcdefgh"
for ax, i in zip(axes, alphabet):
env, system = ax.get_title().split("|")
env = env.split("=")[-1].strip()
system = system.split("=")[-1].strip()
ax.set_title(f"({i}) {env} throughput, {system}")
if ax.get_ylabel():
frame_skip = {"Atari": 4, "Mujoco": 5}[env]
ax.set_ylabel(f"FPS, frameskip = {frame_skip}")
g.savefig(f"throughput.{suffix}")
if __name__ == "__main__":
pd.options.display.float_format = '{:,.0f}'.format
parser = argparse.ArgumentParser()
parser.add_argument("--suffix", type=str, default="png")
args = parser.parse_args()
benchmark(args.suffix)
|
open/core/betterself/models/__init__.py | lawrendran/open | 105 | 12736225 | <reponame>lawrendran/open
from open.utilities.importing_models import import_submodules
__all__ = import_submodules(__name__)
|
equip/visitors/classes.py | neuroo/equip | 102 | 12736254 | # -*- coding: utf-8 -*-
"""
equip.visitors.classes
~~~~~~~~~~~~~~~~~~~~~~
Callback the visit method for each encountered class in the program.
:copyright: (c) 2014 by <NAME> (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
class ClassVisitor(object):
"""
A class visitor that is triggered for all encountered ``TypeDeclaration``.
Example, listing all types declared in the bytecode::
class TypeDeclVisitor(ClassVisitor):
def __init__(self):
ClassVisitor.__init__(self)
def visit(self, typeDecl):
print "New type: %s (parentDecl=%s)" \\
% (typeDecl.type_name, typeDecl.parent)
"""
def __init__(self):
pass
def visit(self, typeDecl):
pass
|
mermaid/libraries/modules/stn_nd.py | HastingsGreer/mermaid | 120 | 12736257 | <filename>mermaid/libraries/modules/stn_nd.py
"""
This package implements spatial transformations in 1D, 2D, and 3D.
This is needed for the map-based registrations for example.
.. todo::
Implement CUDA version. There is already a 2D CUDA version available (in the source directory here).
But it needs to be extended to 1D and 3D. We also make use of a different convention for images which needs
to be accounted for, as we use the BxCxXxYxZ image format and BxdimxXxYxZ for the maps.
"""
#TODO
from torch.nn.modules.module import Module
###########TODO temporal comment for torch1 compatability
# from mermaid.libraries.functions.stn_nd import STNFunction_ND_BCXYZ, STNFunction_ND_BCXYZ_Compile
# from mermaid.libraries.functions.nn_interpolation import get_nn_interpolationf
################################################################3
from ..functions.stn_nd import STNFunction_ND_BCXYZ
from functools import partial
# class STN_ND(Module):
# """
# Legacy code for nD spatial transforms. Ignore for now. Implements spatial transforms, but in BXYZC format.
# """
# def __init__(self, dim):
# super(STN_ND, self).__init__()
# self.dim = dim
# """spatial dimension"""
# self.f = STNFunction_ND( self.dim )
# """spatial transform function"""
# def forward(self, input1, input2):
# """
# Simply returns the transformed input
#
# :param input1: image in BCXYZ format
# :param input2: map in BdimXYZ format
# :return: returns the transformed image
# """
# return self.f(input1, input2)
class STN_ND_BCXYZ(Module):
"""
Spatial transform code for nD spatial transoforms. Uses the BCXYZ image format.
"""
def __init__(self, spacing, zero_boundary=False,use_bilinear=True,use_01_input=True,use_compile_version=False):
super(STN_ND_BCXYZ, self).__init__()
self.spacing = spacing
"""spatial dimension"""
if use_compile_version:
if use_bilinear:
self.f = STNFunction_ND_BCXYZ_Compile(self.spacing,zero_boundary)
else:
self.f = partial(get_nn_interpolation,spacing = self.spacing)
else:
self.f = STNFunction_ND_BCXYZ( self.spacing,zero_boundary= zero_boundary,using_bilinear= use_bilinear,using_01_input = use_01_input)
"""spatial transform function"""
def forward(self, input1, input2):
"""
Simply returns the transformed input
:param input1: image in BCXYZ format
:param input2: map in BdimXYZ format
:return: returns the transformed image
"""
return self.f(input1, input2)
|
src/postClass.py | iamshnoo/TerminusBrowser | 104 | 12736278 | <filename>src/postClass.py
class Post():
def __init__(self, userIden, content, timestamp, image=None, score='0', replies=None):
self.userIden = userIden
self.content = content
self.timestamp = timestamp
self.image = image
self.score = score
self.replies = [] if not replies else replies
|
homeassistant/components/siren/const.py | mtarjoianu/core | 30,023 | 12736284 | <gh_stars>1000+
"""Constants for the siren component."""
from enum import IntEnum
from typing import Final
DOMAIN: Final = "siren"
ATTR_TONE: Final = "tone"
ATTR_AVAILABLE_TONES: Final = "available_tones"
ATTR_DURATION: Final = "duration"
ATTR_VOLUME_LEVEL: Final = "volume_level"
class SirenEntityFeature(IntEnum):
"""Supported features of the siren entity."""
TURN_ON = 1
TURN_OFF = 2
TONES = 4
VOLUME_SET = 8
DURATION = 16
# These constants are deprecated as of Home Assistant 2022.5
# Please use the SirenEntityFeature enum instead.
SUPPORT_TURN_ON: Final = 1
SUPPORT_TURN_OFF: Final = 2
SUPPORT_TONES: Final = 4
SUPPORT_VOLUME_SET: Final = 8
SUPPORT_DURATION: Final = 16
|
script/testing/artifact_stats/__main__.py | pmenon/noisepage | 971 | 12736293 | #!/usr/bin/env python3
import argparse
import logging
import sys
from ..reporting.report_result import report_artifact_stats_result
from ..util.constants import LOG, PERFORMANCE_STORAGE_SERVICE_API
from .base_artifact_stats_collector import BaseArtifactStatsCollector
from .collectors import * # Import necessary for __subclasses__ enumeration.
def collect_artifact_stats(collectors):
"""
Executes and combines the results of all the provided collectors.
Parameters
----------
collectors : [BaseArtifactStatsCollector]
Returns
-------
exit_code : int
The exit code of the collection task. 0 on success.
metrics : dict
The combined metrics from all of the collectors.
Meaningless if the exit code is not 0.
"""
exit_code, aggregated_metrics = 0, {}
try:
for collector_class in collectors:
with collector_class(is_debug=args.debug) as collector:
cname = collector.__class__.__name__
LOG.info(f'Starting {cname} collection.')
exit_code = collector.run_collector()
results = collector.metrics
duplicate_keys = set(aggregated_metrics).intersection(results)
if not duplicate_keys:
aggregated_metrics.update(results)
LOG.info(f'{cname} finished successfully.')
else:
exit_code = 1
LOG.error(f'Collector key conflict on {duplicate_keys}.')
LOG.error(f'{cname} failed. Stopping all collection.')
break
except Exception as err:
exit_code = 1 if exit_code == 0 else exit_code
LOG.error(err)
return exit_code, aggregated_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug",
action="store_true",
dest="debug",
default=False,
help="Enable debug output")
parser.add_argument("--publish-results",
default="none",
type=str,
choices=PERFORMANCE_STORAGE_SERVICE_API.keys(),
help="Environment in which to store performance results")
parser.add_argument("--publish-username",
type=str,
help="Performance Storage Service Username")
parser.add_argument("--publish-password",
type=str,
help="Performance Storage Service password")
args = parser.parse_args()
if args.debug:
LOG.setLevel(logging.DEBUG)
# Get the BaseBinaryMetricsCollector subclasses imported from binary_metrics.binary_metrics_collectors
# Effectively this adds each binary metric collector class into an array to be instantiated later.
collectors = [obj for obj in BaseArtifactStatsCollector.__subclasses__()]
exit_code, aggregated_metrics = collect_artifact_stats(collectors)
if not exit_code:
LOG.info(f'Artifact stats: {aggregated_metrics}')
if args.publish_results != 'none':
report_artifact_stats_result(args.publish_results, aggregated_metrics,
args.publish_username, args.publish_password)
logging.shutdown()
sys.exit(exit_code)
|
tests/pipupgrade/test_exception.py | shidevil/pipupgrade | 517 | 12736325 | # imports - module imports
from pipupgrade.exception import (
PipupgradeError
)
# imports - test imports
import pytest
def test_pipupgrade_error():
with pytest.raises(PipupgradeError):
raise PipupgradeError |
vaeseq/codec.py | charlesincharge/vae-seq | 161 | 12736339 | # Copyright 2018 Google, Inc.,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules for encoding and decoding observations."""
import sonnet as snt
import tensorflow as tf
from . import batch_dist
from . import dist_module
from . import util
class EncoderSequence(snt.Sequential):
"""A wrapper arount snt.Sequential that also implements output_size."""
@property
def output_size(self):
return self.layers[-1].output_size
class FlattenEncoder(snt.AbstractModule):
"""Forwards the flattened input."""
def __init__(self, input_size=None, name=None):
super(FlattenEncoder, self).__init__(name=name)
self._input_size = None
if input_size is not None:
self._merge_input_sizes(input_size)
def _merge_input_sizes(self, input_size):
if self._input_size is None:
self._input_size = snt.nest.map(tf.TensorShape, input_size)
return
self._input_size = snt.nest.map(
lambda cur_size, inp_size: cur_size.merge_with(inp_size),
self._input_size,
input_size)
@property
def output_size(self):
"""Returns the output Tensor shapes."""
if self._input_size is None:
return tf.TensorShape([None])
flattened_size = 0
for inp_size in snt.nest.flatten(self._input_size):
num_elements = inp_size.num_elements()
if num_elements is None:
return tf.TensorShape([None])
flattened_size += num_elements
return tf.TensorShape([flattened_size])
def _build(self, inp):
input_sizes = snt.nest.map(lambda inp_i: inp_i.get_shape()[1:], inp)
self._merge_input_sizes(input_sizes)
flatten = snt.BatchFlatten(preserve_dims=1)
flat_inp = snt.nest.map(lambda inp_i: tf.to_float(flatten(inp_i)), inp)
ret = util.concat_features(flat_inp)
util.set_tensor_shapes(ret, self.output_size, add_batch_dims=1)
return ret
def MLPObsEncoder(hparams, name=None):
"""Observation -> encoded, flat observation."""
name = name or "mlp_obs_encoder"
mlp = util.make_mlp(hparams, hparams.obs_encoder_fc_layers,
name=name + "/mlp")
return EncoderSequence([FlattenEncoder(), mlp], name=name)
class DecoderSequence(dist_module.DistModule):
"""A sequence of zero or more AbstractModules, followed by a DistModule."""
def __init__(self, input_encoders, decoder, name=None):
super(DecoderSequence, self).__init__(name=name)
self._input_encoders = input_encoders
self._decoder = decoder
@property
def event_dtype(self):
return self._decoder.event_dtype
@property
def event_size(self):
return self._decoder.event_size
def dist(self, params, name=None):
return self._decoder.dist(params, name=name)
def _build(self, inputs):
if self._input_encoders:
inputs = snt.Sequential(self._input_encoders)(inputs)
return self._decoder(inputs)
def MLPObsDecoder(hparams, decoder, param_size, name=None):
"""Inputs -> decoder(obs; mlp(inputs))."""
name = name or "mlp_" + decoder.module_name
layers = hparams.obs_decoder_fc_hidden_layers + [param_size]
mlp = util.make_mlp(hparams, layers, name=name + "/mlp")
return DecoderSequence([util.concat_features, mlp], decoder, name=name)
class BernoulliDecoder(dist_module.DistModule):
"""Inputs -> Bernoulli(obs; logits=inputs)."""
def __init__(self, dtype=tf.int32, squeeze_input=False, name=None):
self._dtype = dtype
self._squeeze_input = squeeze_input
super(BernoulliDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return self._dtype
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
if self._squeeze_input:
inputs = tf.squeeze(inputs, axis=-1)
return inputs
def dist(self, params, name=None):
return tf.distributions.Bernoulli(
logits=params,
dtype=self._dtype,
name=name or self.module_name + "_dist")
class BetaDecoder(dist_module.DistModule):
"""Inputs -> Beta(obs; conc1, conc0)."""
def __init__(self,
positive_projection=None,
squeeze_input=False,
name=None):
self._positive_projection = positive_projection
self._squeeze_input = squeeze_input
super(BetaDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return tf.float32
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
conc1, conc0 = tf.split(inputs, 2, axis=-1)
if self._positive_projection is not None:
conc1 = self._positive_projection(conc1)
conc0 = self._positive_projection(conc0)
if self._squeeze_input:
conc1 = tf.squeeze(conc1, axis=-1)
conc0 = tf.squeeze(conc0, axis=-1)
return (conc1, conc0)
def dist(self, params, name=None):
conc1, conc0 = params
return tf.distributions.Beta(
conc1, conc0,
name=name or self.module_name + "_dist")
class _BinomialDist(tf.contrib.distributions.Binomial):
"""Work around missing functionality in Binomial."""
def __init__(self, total_count, logits=None, probs=None, name=None):
self._total_count = total_count
super(_BinomialDist, self).__init__(
total_count=tf.to_float(total_count),
logits=logits, probs=probs,
name=name or "Binomial")
def _log_prob(self, counts):
return super(_BinomialDist, self)._log_prob(tf.to_float(counts))
def _sample_n(self, n, seed=None):
all_counts = tf.to_float(tf.range(self._total_count + 1))
for batch_dim in range(self.batch_shape.ndims):
all_counts = tf.expand_dims(all_counts, axis=-1)
all_cdfs = tf.map_fn(self.cdf, all_counts)
shape = tf.concat([[n], self.batch_shape_tensor()], 0)
uniform = tf.random_uniform(shape, seed=seed)
return tf.foldl(
lambda acc, cdfs: tf.where(uniform > cdfs, acc + 1, acc),
all_cdfs,
initializer=tf.zeros(shape, dtype=tf.int32))
class BinomialDecoder(dist_module.DistModule):
"""Inputs -> Binomial(obs; total_count, logits)."""
def __init__(self, total_count=None, squeeze_input=False, name=None):
self._total_count = total_count
self._squeeze_input = squeeze_input
super(BinomialDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return tf.int32
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
if self._squeeze_input:
inputs = tf.squeeze(inputs, axis=-1)
return inputs
def dist(self, params, name=None):
return _BinomialDist(
self._total_count,
logits=params,
name=name or self.module_name + "_dist")
class CategoricalDecoder(dist_module.DistModule):
"""Inputs -> Categorical(obs; logits=inputs)."""
def __init__(self, dtype=tf.int32, name=None):
self._dtype = dtype
super(CategoricalDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return self._dtype
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
return inputs
def dist(self, params, name=None):
return tf.distributions.Categorical(
logits=params,
dtype=self._dtype,
name=name or self.module_name + "_dist")
class NormalDecoder(dist_module.DistModule):
"""Inputs -> Normal(obs; loc=half(inputs), scale=project(half(inputs)))"""
def __init__(self, positive_projection=None, name=None):
self._positive_projection = positive_projection
super(NormalDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return tf.float32
@property
def event_size(self):
return tf.TensorShape([])
def _build(self, inputs):
loc, scale = tf.split(inputs, 2, axis=-1)
if self._positive_projection is not None:
scale = self._positive_projection(scale)
return loc, scale
def dist(self, params, name=None):
loc, scale = params
return tf.distributions.Normal(
loc=loc,
scale=scale,
name=name or self.module_name + "_dist")
class BatchDecoder(dist_module.DistModule):
"""Wrap a decoder to model batches of events."""
def __init__(self, decoder, event_size, name=None):
self._decoder = decoder
self._event_size = tf.TensorShape(event_size)
super(BatchDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return self._decoder.event_dtype
@property
def event_size(self):
return self._event_size
def _build(self, inputs):
return self._decoder(inputs)
def dist(self, params, name=None):
return batch_dist.BatchDistribution(
self._decoder.dist(params, name=name),
ndims=self._event_size.ndims)
class GroupDecoder(dist_module.DistModule):
"""Group up decoders to model a set of independent of events."""
def __init__(self, decoders, name=None):
self._decoders = decoders
super(GroupDecoder, self).__init__(name=name)
@property
def event_dtype(self):
return snt.nest.map(lambda dec: dec.event_dtype, self._decoders)
@property
def event_size(self):
return snt.nest.map(lambda dec: dec.event_size, self._decoders)
def _build(self, inputs):
return snt.nest.map_up_to(
self._decoders,
lambda dec, input_: dec(input_),
self._decoders, inputs)
def dist(self, params, name=None):
with self._enter_variable_scope():
with tf.name_scope(name or "group"):
dists = snt.nest.map_up_to(
self._decoders,
lambda dec, param: dec.dist(param),
self._decoders, params)
return batch_dist.GroupDistribution(dists, name=name)
|
examples/whileloop.py | quynhanh-ngx/pytago | 206 | 12736391 | <reponame>quynhanh-ngx/pytago<filename>examples/whileloop.py
def main():
i = 0
while True:
print(i)
i += 1
if i > 5:
break
j = 10
while j < 100:
print(j)
j += 10
while 1:
print(j + i)
break
while 0.1:
print(j + i)
break
while 0:
print("This never executes")
while 0.0:
print("This never executes")
while None:
print("This never executes")
while False:
print("This never executes")
while "":
print("This never executes")
while "hi":
print("This executes")
break
if __name__ == '__main__':
main()
|
alg/knockoffgan/KnockoffGAN.py | loramf/mlforhealthlabpub | 171 | 12736434 | '''
KnockoffGAN Knockoff Variable Generation
<NAME> (9/27/2018)
'''
#%% Necessary Packages
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import logging
import argparse
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#%% KnockoffGAN Function
'''
Inputs:
x_train: Training data
lamda: Power network parameter = 0.01
mu: WGAN parameter = 1
'''
logger = logging.getLogger()
def KnockoffGAN (x_train, x_name, lamda = 0.01, mu = 1, mb_size=128, niter=2000):
tf_debug = False
if tf_debug:
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
config = tf.ConfigProto()
config.log_device_placement=True
config.gpu_options.allow_growth = True
else:
run_opts = None
config = None
#%% Parameters
# 1. # of samples
n = len(x_train[:,0])
# 2. # of features
x_dim = len(x_train[0,:])
# 3. # of random dimensions
z_dim = int(x_dim)
# 4. # of hidden dimensions
h_dim = int(x_dim)
# 5. # of minibatch
# mb_size = 128
# 6. WGAN parameters
lam = 10
lr = 1e-4
#%% Necessary Functions
# 1. Xavier Initialization Definition
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 2. Sample from normal distribution: Random variable generation
def sample_Z(m, n, x_name):
if ((x_name == 'Normal') | (x_name == 'AR_Normal')):
return np.random.normal(0., np.sqrt(1./3000), size = [m, n]).copy()
elif ((x_name == 'Uniform') | (x_name == 'AR_Uniform')):
return np.random.uniform(-3*np.sqrt(1./3000),3*np.sqrt(1./3000),[m,n]).copy()
# 3. Sample from the real data (Mini-batch index sampling)
def sample_X(m, n):
return np.random.permutation(m)[:n].copy()
# 4. Permutation for MINE computation
def Permute (x):
n = len(x[:,0])
idx = np.random.permutation(n)
out = x[idx,:].copy()
return out
# 5. Bernoulli sampling for Swap and Hint variables
def sample_SH(m, n, p):
return np.random.binomial(1, p, [m,n]).copy()
#%% Placeholder inputs
# 1. Feature
X = tf.placeholder(tf.float32, shape = [None, x_dim])
# 2. Feature (Permute)
X_hat = tf.placeholder(tf.float32, shape = [None, x_dim])
# 3. Random Variable
Z = tf.placeholder(tf.float32, shape = [None, z_dim])
# 4. Swap
S = tf.placeholder(tf.float32, shape = [None, x_dim])
# 5. Hint
H = tf.placeholder(tf.float32, shape = [None, x_dim])
#%% Network Building
#%% 1. Discriminator
# Input: Swap (X, tilde X) and Hint
D_W1 = tf.Variable(xavier_init([x_dim + x_dim + x_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
#%% 2. WGAN Discriminator
# Input: tilde X
WD_W1 = tf.Variable(xavier_init([x_dim, h_dim]))
WD_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
WD_W2 = tf.Variable(xavier_init([h_dim,1]))
WD_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_WD = [WD_W1, WD_W2, WD_b1, WD_b2]
#%% 3. Generator
# Input: X and Z
G_W1 = tf.Variable(xavier_init([x_dim + z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
#%% 4. MINE
# Input: X and tilde X
# For X
M_W1A = tf.Variable(xavier_init([x_dim]))
M_W1B = tf.Variable(xavier_init([x_dim]))
M_b1 = tf.Variable(tf.zeros(shape=[x_dim]))
# For tilde X
M_W2A = tf.Variable(xavier_init([x_dim]))
M_W2B = tf.Variable(xavier_init([x_dim]))
M_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
# Combine
M_W3 = tf.Variable(xavier_init([x_dim]))
M_b3 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_M = [M_W1A, M_W1B, M_W2A, M_W2B, M_W3, M_b1, M_b2, M_b3]
#%% Functions
# 1. Generator
def generator(x, z):
inputs = tf.concat(axis=1, values = [x, z])
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_out = (tf.matmul(G_h1, G_W2) + G_b2)
return G_out
# 2. Discriminator
def discriminator(sA, sB, h):
inputs = tf.concat(axis=1, values = [sA, sB, h])
D_h1 = tf.nn.tanh(tf.matmul(inputs, D_W1) + D_b1)
D_out = tf.nn.sigmoid(tf.matmul(D_h1, D_W2) + D_b2)
return D_out
# 3. WGAN Discriminator
def WGAN_discriminator(x):
WD_h1 = tf.nn.relu(tf.matmul(x, WD_W1) + WD_b1)
WD_out = (tf.matmul(WD_h1, WD_W2) + WD_b2)
return WD_out
# 4. MINE
def MINE(x, x_hat):
M_h1 = tf.nn.tanh(M_W1A * x + M_W1B * x_hat + M_b1)
M_h2 = tf.nn.tanh(M_W2A * x + M_W2B * x_hat + M_b2)
M_out = (M_W3 * (M_h1 + M_h2) + M_b3)
Exp_M_out = tf.exp(M_out)
return M_out, Exp_M_out
#%% Combination across the networks
# 1. Generater Knockoffs
G_sample = generator(X,Z)
# 2. WGAN Outputs for real and fake
WD_real = WGAN_discriminator(X)
WD_fake = WGAN_discriminator(G_sample)
# 3. Generate swapping (X, tilde X)
SwapA = S * X + (1-S) * G_sample
SwapB = (1-S) * X + S * G_sample
# 4. Discriminator output
# (X, tilde X) is SwapA, SwapB. Hint is generated by H * S
D_out = discriminator(SwapA, SwapB, H*S)
# 5. MINE Computation
# Without permutation
M_out, _ = MINE(X, G_sample)
# Wit permutation
_, Exp_M_out = MINE(X_hat, G_sample)
# 6. WGAN Loss Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps = tf.random_uniform([mb_size, 1], minval = 0., maxval = 1.)
X_inter = eps*X + (1. - eps) * G_sample
# 2. Line 7 in Algorithm 1
grad = tf.gradients(WGAN_discriminator(X_inter), [X_inter])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad)**2 + 1e-8, axis = 1))
grad_pen = lam * tf.reduce_mean((grad_norm - 1)**2)
#%% Loss function
# 1. WGAN Loss
WD_loss = tf.reduce_mean(WD_fake) - tf.reduce_mean(WD_real) + grad_pen
# 2. Discriminator loss
D_loss = -tf.reduce_mean(S * (1-H) * tf.log(D_out + 1e-8) + (1-S) * (1-H) * tf.log(1 - D_out + 1e-8))
# 3. MINE Loss
M_loss = tf.reduce_sum( tf.reduce_mean(M_out, axis = 0) - tf.log(tf.reduce_mean(Exp_M_out, axis = 0)) )
# 4. Generator loss
G_loss = - D_loss + mu * -tf.reduce_mean(WD_fake) + lamda * M_loss
# Solver
WD_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(WD_loss, var_list = theta_WD))
D_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(D_loss, var_list = theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(G_loss, var_list = theta_G))
M_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(-M_loss, var_list = theta_M))
#%% Sessions
if tf_debug:
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer(), options=run_opts)
else:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#%% Iterations
for it in tqdm(range(niter)):
for dummy_range in range(5):
#%% WGAN, Discriminator and MINE Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.9)
# 1. WGAN Training
_, WD_loss_curr = sess.run([WD_solver, WD_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 2. Discriminator Training
# print('discriminator training')
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 3. MINE Training
# print('mine training')
_, M_loss_curr = sess.run([M_solver, M_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Generator Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.0)
# Generator training
# print('gen training')
_, G_loss_curr, G_sample_curr = sess.run([G_solver, G_loss, G_sample], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Output
#print('last session run')
X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)}, options=run_opts)[0]
# X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)})[0]
#print('closing session')
sess.close()
tf.reset_default_graph()
return X_knockoff
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i')
parser.add_argument(
'-o')
parser.add_argument(
'--bs', default=128, type=int)
parser.add_argument(
'--it', default=2000, type=int)
parser.add_argument(
'--target')
parser.add_argument(
'--xname', default='Normal', help='Sample distribution [Normal, Uniform]')
parser.add_argument(
'--scale', default=1, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = init_arg()
df = pd.read_csv(args.i)
niter = args.it
use_scale = args.scale
x_name = args.xname
lbl = args.target
features = list(df.columns)
features.remove(lbl)
# scale/normalize dataset
range_scaler = (0, 1)
scaler = MinMaxScaler(feature_range=range_scaler)
x = df[features]
if use_scale:
scaler.fit(x)
x = scaler.transform(x)
else:
x = x.values
x_k = KnockoffGAN(
x,
x_name,
mb_size=args.bs,
niter=niter)
df_k = pd.DataFrame(x_k, columns=features)
df_k[lbl] = df[lbl]
df_k.to_csv(args.o, index=False)
|
src/inference.py | artem-oppermann/Deep-Autoencoders-For-Collaborative-Filtering | 111 | 12736435 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 19 13:39:52 2019
@author: <NAME>
"""
import tensorflow as tf
import os
from model.inference_model import InferenceModel
tf.app.flags.DEFINE_string('checkpoints_path', os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'checkpoints/')),
'Path for the test data.')
tf.app.flags.DEFINE_string('export_path_base', os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'model-export/')),
'Directory where to export the model.')
tf.app.flags.DEFINE_integer('model_version', 1, 'Version number of the model.')
tf.app.flags.DEFINE_integer('num_v', 3952,
'Number of visible neurons (Number of movies the users rated.)')
FLAGS = tf.app.flags.FLAGS
def run_inference():
inference_graph=tf.Graph()
with inference_graph.as_default():
model=InferenceModel(FLAGS)
input_data=tf.placeholder(tf.float32, shape=[None, 3952])
ratings=model.inference(input_data)
saver = tf.train.Saver()
with tf.Session(graph=inference_graph) as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoints_path)
saver.restore(sess, ckpt.model_checkpoint_path)
# Save the model
export_path = os.path.join(tf.compat.as_bytes(FLAGS.export_path_base),
tf.compat.as_bytes('model_v_%s'%str(FLAGS.model_version)))
print('Exporting trained model to %s'%export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# create tensors info
predict_tensor_inputs_info = tf.saved_model.utils.build_tensor_info(input_data)
predict_tensor_scores_info = tf.saved_model.utils.build_tensor_info(ratings)
# build prediction signature
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'inputs': predict_tensor_inputs_info},
outputs={'ratings': predict_tensor_scores_info},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
)
# save the model
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_ratings': prediction_signature
})
builder.save()
if __name__ == "__main__":
run_inference()
|
carbontracker/components/cpu/intel.py | leondz/carbontracker | 186 | 12736457 | <filename>carbontracker/components/cpu/intel.py
import os
import re
import time
from carbontracker.components.handler import Handler
# RAPL Literature:
# https://www.researchgate.net/publication/322308215_RAPL_in_Action_Experiences_in_Using_RAPL_for_Power_Measurements
RAPL_DIR = "/sys/class/powercap/"
CPU = 0
DRAM = 2
MEASURE_DELAY = 1
class IntelCPU(Handler):
def devices(self):
"""Returns the name of all RAPL Domains"""
return self._devices
def available(self):
return os.path.exists(RAPL_DIR) and bool(os.listdir(RAPL_DIR))
def power_usage(self):
before_measures = self._get_measurements()
time.sleep(MEASURE_DELAY)
after_measures = self._get_measurements()
# Ensure all power measurements >= 0 and retry up to 3 times.
attempts = 3
while attempts > 0:
attempts -= 1
power_usages = [
self._compute_power(before, after)
for before, after in zip(before_measures, after_measures)
]
if all(power >= 0 for power in power_usages):
return power_usages
default = [0.0 for device in range(len(self._devices))]
return default
def _compute_power(self, before, after):
"""Compute avg. power usage from two samples in microjoules."""
joules = (after - before) / 1000000
watt = joules / MEASURE_DELAY
return watt
def _read_energy(self, path):
with open(os.path.join(path, "energy_uj"), 'r') as f:
return int(f.read())
def _get_measurements(self):
measurements = []
for package in self._rapl_devices:
try:
power_usage = self._read_energy(os.path.join(
RAPL_DIR, package))
measurements.append(power_usage)
except FileNotFoundError:
# check cpu/gpu/dram
parts = [
f for f in os.listdir(os.path.join(RAPL_DIR, package))
if re.match(self.parts_pattern, f)
]
total_power_usage = 0
for part in parts:
total_power_usage += self._read_energy(
os.path.join(RAPL_DIR, package, part))
measurements.append(total_power_usage)
return measurements
def _convert_rapl_name(self, name, pattern):
if re.match(pattern, name):
return "cpu:" + name[-1]
def init(self):
# Get amount of intel-rapl folders
packages = list(filter(lambda x: ':' in x, os.listdir(RAPL_DIR)))
self.device_count = len(packages)
self._devices = []
self._rapl_devices = []
self.parts_pattern = re.compile(r"intel-rapl:(\d):(\d)")
devices_pattern = re.compile("intel-rapl:.")
for package in packages:
if re.fullmatch(devices_pattern, package):
with open(os.path.join(RAPL_DIR, package, "name"), "r") as f:
name = f.read().strip()
if name != "psys":
self._rapl_devices.append(package)
self._devices.append(
self._convert_rapl_name(package, devices_pattern))
def shutdown(self):
pass
|
plotdevice/lib/io.py | plotdevice/plotdevice | 110 | 12736469 | import objc, os, re
import cIO
for cls in ["AnimatedGif", "Pages", "SysAdmin", "Video"]:
globals()[cls] = objc.lookUpClass(cls)
### Session objects which wrap the GCD-based export managers ###
class ExportSession(object):
def __init__(self):
# state flags
self.running = True
self.cancelled = False
self.added = 0
self.written = 0
self.total = 0
# callbacks
self._complete = None
self._progress = None
self._status = None
# one of the cIO classes
self.writer = None
def begin(self, frames=None, pages=None):
from plotdevice.gui import set_timeout
self.total = frames if frames is not None else pages
self.poll = set_timeout(self, "update:", 0.1, repeat=True)
def update_(self, note):
self.written = self.writer.framesWritten()
if self._progress:
# let the delegate update the progress bar
goal = self.added if self.cancelled else self.total
self._progress(self.written, goal, self.cancelled)
if self.writer.doneWriting():
self.shutdown()
def next(self):
if self.cancelled or self.added==self.total:
return None
return self.added + 1
def cancel(self):
if self.cancelled:
return # be idem potent
self.cancelled = True
if self._status:
self._status('cancelled')
def done(self):
# if self._status:
# self._status('finishing')
if self.writer:
self.writer.closeFile()
def shutdown(self):
self.running = False
self._progress = None
if self._status:
self._status('complete')
self._status = None
if self.poll:
self.poll.invalidate()
self.poll = None
if self._complete:
self._complete()
self._complete = None
self.writer = None
def on(self, **handlers):
for event, cb in handlers.items():
setattr(self, '_'+event, cb)
if 'complete' in handlers and not self.running:
self.shutdown() # call the handler immediately
re_padded = re.compile(r'{(\d+)}')
class ImageExportSession(ExportSession):
def __init__(self, fname, format='pdf', first=1, last=None, single=False, **rest):
super(ImageExportSession, self).__init__()
self.single_file = single or first==last
if last is not None:
self.begin(pages=last-first+1)
self.format = format
m = re_padded.search(fname)
pad = '%%0%id' % int(m.group(1)) if m else None
if self.single_file:
# output a single file (potentially a multipage PDF)
if pad:
fname = re_padded.sub(pad%0, fname, count=1)
self.writer = Pages.alloc().initWithFile_(fname)
else:
# output multiple, sequentially-named files
if pad:
name_tmpl = re_padded.sub(pad, fname, count=1)
else:
basename, ext = os.path.splitext(fname)
name_tmpl = "".join([basename, '-%04d', ext])
self.writer = Pages.alloc().initWithPattern_(name_tmpl)
def add(self, canvas):
image = canvas._getImageData(self.format)
self.writer.addPage_(image)
self.added += 1
class MovieExportSession(ExportSession):
def __init__(self, fname, format='mov', first=1, last=None, fps=30, bitrate=1, loop=0, **rest):
super(MovieExportSession, self).__init__()
try:
os.unlink(fname)
except:
pass
if last is not None:
self.begin(frames=last-first+1)
self.fname = fname
self.format = format
self.fps = fps
self.loop = loop
self.bitrate = bitrate
def add(self, canvas):
image = canvas.rasterize()
if not self.writer:
dims = image.size()
if self.format == 'mov':
self.writer = Video.alloc()
self.writer.initWithFile_size_fps_bitrate_(self.fname, dims, self.fps, self.bitrate)
elif self.format == 'gif':
self.writer = AnimatedGif.alloc()
self.writer.initWithFile_size_fps_loop_(self.fname, dims, self.fps, self.loop)
else:
print 'unrecognized output format: %s' % self.format
return self.shutdown()
self.writer.addFrame_(image)
self.added += 1
|
model/multitask_v1/tdnn.py | LCF2764/tf-kaldi-speaker | 154 | 12736473 | # Build the speaker and phone networks.
# In this framework, they are both TDNN with different settings.
# The speaker network is a hard-coded TDNN and the phone network is specified by the parameters.
# Of course, the speaker network can be modified (e.g. to a larger network). Meanwhile, the parameters for the
# phone network should be modified as well so that the architecure is consistent with the speaker network.
# TODO: we can make the speaker network also controlled by config file which is not too difficult.
import tensorflow as tf
from model.multitask_v1.pooling import statistics_pooling_v2
from model.common import l2_scaling, shape_list, prelu
def build_speaker_encoder(features, phone_labels, feature_length, params, endpoints, reuse_variables, is_training=False):
"""Build encoder for speaker latent variable.
Use the same tdnn network with x-vector.
Args:
features: the input features.
phone_labels: the phone labels (i.e. alignment). will be used in the future.
feature_length: the length of each feature.
params: the parameters.
endpoints: will be updated during building.
reuse_variables: if true, reuse the existing variables.
is_training: used in batchnorm
:return: sampled_zs, mu_zs, logvar_zs
"""
relu = tf.nn.relu
if "network_relu_type" in params.dict:
if params.network_relu_type == "prelu":
relu = prelu
if params.network_relu_type == "lrelu":
relu = tf.nn.leaky_relu
with tf.variable_scope("encoder", reuse=reuse_variables):
# Layer 1: [-2,-1,0,1,2] --> [b, 1, l-4, 512]
# conv2d + batchnorm + relu
features = tf.layers.conv2d(features,
512,
(1, 5),
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv1')
endpoints["conv1"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn1")
endpoints["bn1"] = features
features = relu(features, name='relu1')
endpoints["relu1"] = features
# Layer 2: [-2, -1, 0, 1, 2] --> [b ,1, l-4, 512]
# conv2d + batchnorm + relu
# This is slightly different with Kaldi which use dilation convolution
features = tf.layers.conv2d(features,
512,
(1, 5),
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv2')
endpoints["conv2"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn2")
endpoints["bn2"] = features
features = relu(features, name='relu2')
endpoints["relu2"] = features
# Layer 3: [-3, -2, -1, 0, 1, 2, 3] --> [b, 1, l-6, 512]
# conv2d + batchnorm + relu
# Still, use a non-dilation one
features = tf.layers.conv2d(features,
512,
(1, 7),
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv3')
endpoints["conv3"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn3")
endpoints["bn3"] = features
features = relu(features, name='relu3')
endpoints["relu3"] = features
# Convert to [b, l, 512]
features = tf.squeeze(features, axis=1)
# The output of the 3-rd layer can simply be rank 3.
endpoints["relu3"] = features
# Layer 4: [b, l, 512] --> [b, l, 512]
features = tf.layers.dense(features,
512,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="dense4")
endpoints["dense4"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn4")
endpoints["bn4"] = features
features = relu(features, name='relu4')
endpoints["relu4"] = features
# Layer 5: [b, l, x]
if "num_nodes_pooling_layer" not in params.dict:
# The default number of nodes before pooling
params.dict["num_nodes_pooling_layer"] = 1500
features = tf.layers.dense(features,
params.num_nodes_pooling_layer,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="dense5")
endpoints["dense5"] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn5")
endpoints["bn5"] = features
features = relu(features, name='relu5')
endpoints["relu5"] = features
# Here, we need to slice the feature since the original feature is expanded by the larger context between
# the speaker and phone context. I make a hypothesis that the phone context will be larger.
# So the speaker network need to slicing.
if (params.speaker_left_context < params.phone_left_context and
params.speaker_right_context < params.phone_right_context):
features = features[:, params.phone_left_context - params.speaker_left_context:
params.speaker_right_context - params.phone_right_context, :]
else:
raise NotImplementedError("The speake and phone context is not supported now.")
# Make sure we've got the right feature
with tf.control_dependencies([tf.assert_equal(shape_list(features)[1], shape_list(phone_labels)[1])]):
# Pooling layer
# The length of utterances may be different.
# The original pooling use all the frames which is not appropriate for this case.
# So we create a new function (I don't want to change the original one).
if params.pooling_type == "statistics_pooling":
features = statistics_pooling_v2(features, feature_length, endpoints, params, is_training)
else:
raise NotImplementedError("Not implement %s pooling" % params.pooling_type)
endpoints['pooling'] = features
# Utterance-level network
# Layer 6: [b, 512]
features = tf.layers.dense(features,
512,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='dense6')
endpoints['dense6'] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn6")
endpoints["bn6"] = features
features = relu(features, name='relu6')
endpoints["relu6"] = features
# Layer 7: [b, x]
if "speaker_dim" not in params.dict:
# The default number of nodes in the last layer
params.dict["speaker_dim"] = 512
# We need mean and logvar.
mu = tf.layers.dense(features,
params.speaker_dim,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer),
name="zs_dense")
endpoints['zs_mu_dense'] = mu
if "spk_last_layer_no_bn" not in params.dict:
params.spk_last_layer_no_bn = False
if not params.spk_last_layer_no_bn:
mu = tf.layers.batch_normalization(mu,
momentum=params.batchnorm_momentum,
training=is_training,
name="zs_bn")
endpoints['zs_mu_bn'] = mu
if "spk_last_layer_linear" not in params.dict:
params.spk_last_layer_linear = False
if not params.spk_last_layer_linear:
mu = relu(mu, name="zs_mu_relu")
endpoints['zs_mu_relu'] = mu
# We do not compute logvar in this version.
# Set logvar=0 ==> var=1
logvar = 0
# epsilon = tf.random_normal(tf.shape(mu), name='zs_epsilon')
# sample = mu + tf.exp(0.5 * logvar) * epsilon
sample = mu
return sample, mu, logvar
def build_phone_encoder(features, speaker_labels, feature_length, params, endpoints, reuse_variables, is_training=False):
"""Build encoder for phone latent variable.
Use the tdnn and share the same structure in the lower layers.
Args:
features: the input features.
speaker_labels: the speaker labels (i.e. the speaker index). may be used in the future.
feature_length: the length of each feature.
params: the parameters.
endpoints: will be updated during building.
reuse_variables: if true, reuse the existing variables
is_training: used in batchnorm.
:return: sampled_zs, mu_zs, logvar_zs
"""
relu = tf.nn.relu
if "network_relu_type" in params.dict:
if params.network_relu_type == "prelu":
relu = prelu
if params.network_relu_type == "lrelu":
relu = tf.nn.leaky_relu
# # This is moved to the model config file.
# # Acoustic network params:
# # Most share 4 layers with x-vector network.
# # [-2,2], [-2,2], [-3,3], [0], [-4,0,4]
# # The last fully-connected layer is appended as the phonetic embedding
# layer_size = [512, 512, 512, 512, 512]
# kernel_size = [5, 5, 7, 1, 3]
# dilation_size = [1, 1, 1, 1, 4]
num_layers = len(params.phone_kernel_size)
layer_index = 0
if params.num_shared_layers > 0:
# We may share the lower layers of the two tasks.
# Go through the shared layers between the speaker and phone networks.
assert params.num_shared_layers < num_layers
with tf.variable_scope("encoder", reuse=True):
for i in range(params.num_shared_layers):
if params.phone_kernel_size[layer_index] > 1:
if len(shape_list(features)) == 3:
# Add a dummy dim to support 2d conv
features = tf.expand_dims(features, axis=1)
features = tf.layers.conv2d(features,
params.phone_layer_size[layer_index],
(1, params.phone_kernel_size[layer_index]),
activation=None,
dilation_rate=(1, params.phone_dilation_size[layer_index]),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='conv%d' % (layer_index + 1))
elif params.phone_kernel_size[layer_index] == 1:
if len(shape_list(features)) == 4:
# Remove a dummy dim to do dense layer
features = tf.squeeze(features, axis=1)
features = tf.layers.dense(features,
params.phone_layer_size[layer_index],
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="dense%d" % (layer_index + 1))
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="bn%d" % (layer_index + 1))
features = relu(features, name='relu%d' % (layer_index + 1))
layer_index += 1
with tf.variable_scope("encoder_phone", reuse=reuse_variables):
# In the unshared part, the endpoints should be updated.
while layer_index < num_layers:
if params.phone_kernel_size[layer_index] > 1:
if len(shape_list(features)) == 3:
features = tf.expand_dims(features, axis=1)
features = tf.layers.conv2d(features,
params.phone_layer_size[layer_index],
(1, params.phone_kernel_size[layer_index]),
activation=None,
dilation_rate=(1, params.phone_dilation_size[layer_index]),
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name='phn_conv%d' % (layer_index + 1))
endpoints["phn_conv%d" % (layer_index + 1)] = features
elif params.phone_kernel_size[layer_index] == 1:
if len(shape_list(features)) == 4:
features = tf.squeeze(features, axis=1)
features = tf.layers.dense(features,
params.phone_layer_size[layer_index],
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
params.weight_l2_regularizer),
name="phn_dense%d" % (layer_index + 1))
endpoints["phn_dense%d" % (layer_index + 1)] = features
features = tf.layers.batch_normalization(features,
momentum=params.batchnorm_momentum,
training=is_training,
name="phn_bn%d" % (layer_index + 1))
endpoints["phn_bn%d" % (layer_index + 1)] = features
features = relu(features, name='phn_relu%d' % (layer_index + 1))
endpoints["phn_relu%d" % (layer_index + 1)] = features
layer_index += 1
# The last layer
if len(shape_list(features)) == 4:
features = tf.squeeze(features, axis=1)
# Similar with the speaker network, we may need to slice the feature due to the different context between
# the speaker and phone network. At this moment, I just make a hypothesis that the phone context will be
# larger which means there is no need to slice for the phone network
if (params.speaker_left_context > params.phone_left_context and
params.speaker_right_context > params.phone_right_context):
raise NotImplementedError("The speake and phone context is not supported now.")
# features = features[:, params.speaker_left_context - params.phone_left_context:
# params.phone_right_context - params.speaker_right_context, :]
# # We do not validate the length because this will introduce the alignment -- phn_labels, which
# # is unnecessary when doing the phone inference.
# with tf.control_dependencies([tf.assert_equal(shape_list(features)[1], shape_list(self.phn_labels)[1])]):
# features = tf.identity(features)
if "phone_dim" not in params.dict:
params.dict["phone_dim"] = 512
mu = tf.layers.dense(features,
params.phone_dim,
activation=None,
kernel_regularizer=tf.contrib.layers.l2_regularizer(params.weight_l2_regularizer),
name="zp_dense")
endpoints['zp_mu_dense'] = mu
mu = tf.layers.batch_normalization(mu,
momentum=params.batchnorm_momentum,
training=is_training,
name="zp_bn")
endpoints['zp_mu_bn'] = mu
mu = relu(mu, name='zp_mu_relu')
endpoints['zp_mu_relu'] = mu
logvar = 0
# epsilon = tf.random_normal(tf.shape(mu), name='zp_epsilon')
# sample = mu + tf.exp(0.5 * logvar) * epsilon
sample = mu
return sample, mu, logvar
|
Maths_And_Stats/Number_Theory/Sieve_of_Eratosthenes/prime_and_base.py | arslantalib3/algo_ds_101 | 182 | 12736501 | #include<bits/stdc++.h>
using namespace std;
char letterconverter(int number)
{
return 'A' + (number-10);
}
int main()
{
vector<int> primes;
int num, flag, x, base2, i, upto;
// Take input from user
cout << "Find prime numbers upto : ";
cin >> upto;
for(num = 2; num <= upto; num++)
{
flag = 0;
for(i = 2; i <= (num / 2); i++)
{
if(num % i == 0) {
flag = 1;
break;
}
}
// If the number is prime then store it in primes vector
if(flag == 0)
primes.push_back(num);
}
// Printing all primes between [2, N]
cout << endl << "All prime numbers upto " << upto << " are : " << endl;
for(i=0; i<primes.size(); i++)
{
cout<<primes[i]<<" ";
}
cout<<"\n";
cout << "What base do you want it to be?\n";
cin>>base2;
for (int j =0; j<primes.size(); j++)
{
string temp = "";
num = primes[j];
while(num > 0)
{
x = num % base2;
if(x <= 9)
{
temp += to_string(x);
}
else
{
temp += letterconverter(x);
}
num /= base2;
}
reverse(temp.begin(), temp.end());
cout<<temp<<" ";
}
} |
criterion/ResponseMap/bbox_regression/L1.py | zhangzhengde0225/SwinTrack | 143 | 12736507 | <gh_stars>100-1000
import torch.nn as nn
from criterion.common.reduction.default import build_loss_reduction_function
from data.operator.bbox.spatial.vectorized.torch.cxcywh_to_xyxy import box_cxcywh_to_xyxy
def l1_loss_data_adaptor(pred, label, _):
predicted_bbox = pred['bbox']
if label is None:
return False, predicted_bbox.sum() * 0
(num_boxes_pos, target_bounding_box_label_matrix) = label
return True, (box_cxcywh_to_xyxy(predicted_bbox), box_cxcywh_to_xyxy(target_bounding_box_label_matrix))
def reduce_by_weight(loss, pred, label, context):
return ((loss * context['sample_weight'].unsqueeze(-1).expand(-1, 4)).reshape(-1)).sum() / 4
def build_L1(loss_parameters, *_):
l1_loss = nn.L1Loss(reduction='none')
if 'reduce' in loss_parameters and loss_parameters['reduce'] == 'weighted':
loss_reduce_function = reduce_by_weight
else:
loss_reduce_function = build_loss_reduction_function(loss_parameters)
return l1_loss, l1_loss_data_adaptor, loss_reduce_function
|
pygam/tests/test_terms.py | pjk645/pyGAM | 714 | 12736530 | # -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
import pytest
from pygam import *
from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList
from pygam.utils import flatten
@pytest.fixture
def chicago_gam(chicago_X_y):
X, y = chicago_X_y
gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y)
return gam
def test_wrong_length():
"""iterable params must all match lengths
"""
with pytest.raises(ValueError):
SplineTerm(0, lam=[0, 1, 2], penalties=['auto', 'auto'])
def test_num_coefs(mcycle_X_y, wage_X_y):
"""make sure this method gives correct values
"""
X, y = mcycle_X_y
term = Intercept().compile(X)
assert term.n_coefs == 1
term = LinearTerm(0).compile(X)
assert term.n_coefs == 1
term = SplineTerm(0).compile(X)
assert term.n_coefs == term.n_splines
X, y = wage_X_y
term = FactorTerm(2).compile(X)
assert term.n_coefs == 5
term_a = SplineTerm(0).compile(X)
term_b = SplineTerm(1).compile(X)
term = TensorTerm(term_a, term_b).compile(X)
assert term.n_coefs == term_a.n_coefs * term_b.n_coefs
def test_term_list_removes_duplicates():
"""prove that we remove duplicated terms"""
term = SplineTerm(0)
term_list = term + term
assert isinstance(term_list, TermList)
assert len(term_list) == 1
def test_tensor_invariance_to_scaling(chicago_gam, chicago_X_y):
"""a model with tensor terms should give results regardless of input scaling
"""
X, y = chicago_X_y
X[:, 3] = X[:, 3] * 100
gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y)
assert np.allclose(gam.coef_, chicago_gam.coef_, atol=1e-6)
def test_tensor_must_have_at_least_2_marginal_terms():
with pytest.raises(ValueError):
te(0)
def test_tensor_term_expands_args_to_match_penalties_and_terms():
tensor = te(0, 1, lam=3)
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 2
tensor = te(0, 1, penalties='auto')
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 2
tensor = te(0, 1, penalties=['auto', ['auto', 'auto']])
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 3
def test_tensor_term_skips_kwargs_when_marginal_term_is_supplied():
tensor = te(0, s(1), n_splines=420)
assert tensor._terms[0].n_coefs == 420
assert tensor._terms[1].n_coefs != 420
def test_tensor_term_doesnt_accept_tensor_terms():
with pytest.raises(ValueError):
te(l(0), te(0, 1))
def test_tensor_args_length_must_agree_with_number_of_terms():
with pytest.raises(ValueError):
te(0, 1, lam=[3])
with pytest.raises(ValueError):
te(0, 1, lam=[3])
with pytest.raises(ValueError):
te(0, 1, lam=[3, 3, 3])
def test_build_from_info():
"""we can rebuild terms from info
"""
terms = [Intercept(),
LinearTerm(0),
SplineTerm(0),
FactorTerm(0),
TensorTerm(0,1)]
for term in terms:
assert Term.build_from_info(term.info) == term
assert te(0, 1) == TensorTerm(SplineTerm(0, n_splines=10), SplineTerm(1, n_splines=10))
def test_by_variable():
"""our fit on the toy tensor dataset with a by variable on the linear feature
should be similar to the fit with a tensor product of a spline with a linear
term
"""
pass
def test_by_variable_doesnt_exist_in_X(mcycle_X_y):
"""raises a value error if we cannot locate the by variable
"""
term = s(0, by=1)
with pytest.raises(ValueError):
term.compile(mcycle_X_y[0])
def test_term_list_from_info():
"""we can remake a term list from info
"""
term_list = SplineTerm(0) + LinearTerm(1)
assert Term.build_from_info(term_list.info) == term_list
def test_term_list_only_accepts_terms_or_term_list():
TermList()
with pytest.raises(ValueError):
TermList(None)
def test_pop_term_from_term_list():
term_list = SplineTerm(0) + LinearTerm(1) + Intercept()
term_list_2 = deepcopy(term_list)
# by default we pop the last
assert term_list_2.pop() == term_list[-1]
assert term_list_2.pop(0) == term_list[0]
with pytest.raises(ValueError):
term_list_2.pop(1) == term_list[0]
def test_no_multiply():
"""trying to multiply terms raises an error
"""
with pytest.raises(NotImplementedError):
SplineTerm(0) * LinearTerm(1)
term_list = SplineTerm(0) + LinearTerm(1)
with pytest.raises(NotImplementedError):
term_list * term_list
def test_by_is_similar_to_tensor_with_linear_term(toy_interaction_X_y):
"""for simple interactions we can acheive equivalent fits using:
- a spline with a by-variable
- a tensor between spline and a linear term
"""
X, y = toy_interaction_X_y
gam_a = LinearGAM(te(s(0, n_splines=20), l(1))).fit(X, y)
gam_b = LinearGAM(s(0, by=1)).fit(X, y)
r2_a = gam_a.statistics_['pseudo_r2']['explained_deviance']
r2_b = gam_b.statistics_['pseudo_r2']['explained_deviance']
assert np.allclose(r2_a, r2_b)
def test_correct_smoothing_in_tensors(toy_interaction_X_y):
"""check that smoothing penalties are correctly computed across the marginal
dimensions
feature 0 is the sinusoid, so this one needs to be wiggly
feature 1 is the linear function, so this can smoothed heavily
"""
X, y = toy_interaction_X_y
# increase smoothing on linear function heavily, to no detriment
gam = LinearGAM(te(0, 1, lam=[0.6, 10000])).fit(X, y)
assert gam.statistics_['pseudo_r2']['explained_deviance'] > 0.9
# smoothing the sinusoid function heavily reduces fit quality
gam = LinearGAM(te(0, 1, lam=[10000, 0.6])).fit(X, y)
assert gam.statistics_['pseudo_r2']['explained_deviance'] < 0.1
def test_dummy_encoding(wage_X_y, wage_gam):
"""check that dummy encoding produces fewer coefficients than one-hot"""
X, y = wage_X_y
gam = LinearGAM(s(0) + s(1) + f(2, coding='dummy')).fit(X, y)
assert gam._modelmat(X=X, term=2).shape[1] == 4
assert gam.terms[2].n_coefs == 4
assert wage_gam._modelmat(X=X, term=2).shape[1] == 5
assert wage_gam.terms[2].n_coefs == 5
def test_build_cyclic_p_spline(hepatitis_X_y):
"""check the cyclic p spline builds
the r2 for a cyclic gam on a obviously aperiodic function should suffer
"""
X, y = hepatitis_X_y
# unconstrained gam
gam = LinearGAM(s(0)).fit(X, y)
r_unconstrained = gam.statistics_['pseudo_r2']['explained_deviance']
# cyclic gam
gam = LinearGAM(s(0, basis='cp')).fit(X, y)
r_cyclic = gam.statistics_['pseudo_r2']['explained_deviance']
assert r_unconstrained > r_cyclic
def test_cyclic_p_spline_periodicity(hepatitis_X_y):
"""check the cyclic p spline behavioves periodically
namely:
- the value at the edge knots should be the same
- extrapolation should be periodic
"""
X, y = hepatitis_X_y
gam = LinearGAM(s(0, basis='cp')).fit(X, y)
# check periodicity
left = gam.edge_knots_[0][1]
right = gam.edge_knots_[0][1]
assert(gam.predict(left) == gam.predict(right))
# check extrapolation
further = right + (right - left)
assert(gam.predict(further) == gam.predict(right))
def test_cyclic_p_spline_custom_period():
"""show that we can set custom edge_knots, and that these affect our model's
performance
"""
# define square wave
X = np.linspace(0, 1, 5000)
y = X > 0.5
# when modeling the full period, we get close with a periodic basis
gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0)).fit(X, y)
assert np.allclose(gam.predict(X), y)
assert np.allclose(gam.edge_knots_[0], [0, 1])
# when modeling a non-periodic function, our periodic model fails
gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0, edge_knots=[0, 0.5])).fit(X, y)
assert np.allclose(gam.predict(X), 0.5)
assert np.allclose(gam.edge_knots_[0], [0, 0.5])
def test_tensor_terms_have_constraints(toy_interaction_X_y):
"""test that we can fit a gam with constrained tensor terms,
even if those constraints are 'none'
"""
X, y = toy_interaction_X_y
gam = LinearGAM(te(0, 1, constraints='none')).fit(X, y)
assert gam._is_fitted
assert gam.terms.hasconstraint
def test_tensor_composite_constraints_equal_penalties():
"""check that the composite constraint matrix for a tensor term
is equivalent to a penalty matrix under the correct conditions
"""
from pygam.penalties import derivative
def der1(*args, **kwargs):
kwargs.update({'derivative':1})
return derivative(*args, **kwargs)
# create a 3D tensor where the penalty should be equal to the constraint
term = te(0, 1, 2,
n_splines=[4, 5, 6],
penalties=der1,
lam=1,
constraints='monotonic_inc')
# check all the dimensions
for i in range(3):
P = term._build_marginal_penalties(i).A
C = term._build_marginal_constraints(i,
-np.arange(term.n_coefs),
constraint_lam=1,
constraint_l2=0).A
assert (P == C).all()
def test_tensor_with_constraints(hepatitis_X_y):
"""we should be able to fit a gam with not 'none' constraints on a tensor term
and observe its effect in reducing the R2 of the fit
"""
X, y = hepatitis_X_y
X = np.c_[X, np.random.randn(len(X))] # add a random interaction data
# constrain useless dimension
gam_useless_constraint = LinearGAM(te(0, 1,
constraints=['none', 'monotonic_dec'],
n_splines=[20, 4]))
gam_useless_constraint.fit(X, y)
# constrain informative dimension
gam_constrained = LinearGAM(te(0, 1,
constraints=['monotonic_dec', 'none'],
n_splines=[20, 4]))
gam_constrained.fit(X, y)
assert gam_useless_constraint.statistics_['pseudo_r2']['explained_deviance'] > 0.5
assert gam_constrained.statistics_['pseudo_r2']['explained_deviance'] < 0.1
class TestRegressions(object):
def test_no_auto_dtype(self):
with pytest.raises(ValueError):
SplineTerm(feature=0, dtype='auto')
def test_compose_penalties(self):
"""penalties should be composable, and this is done by adding all
penalties on a single term, NOT multiplying them.
so a term with a derivative penalty and a None penalty should be equvalent
to a term with a derivative penalty.
"""
base_term = SplineTerm(0)
term = SplineTerm(feature=0, penalties=['auto', 'none'])
# penalties should be equivalent
assert (term.build_penalties() == base_term.build_penalties()).A.all()
# multitple penalties should be additive, not multiplicative,
# so 'none' penalty should have no effect
assert np.abs(term.build_penalties().A).sum() > 0
def test_compose_constraints(self, hepatitis_X_y):
"""we should be able to compose penalties
here we show that a gam with a monotonic increasing penalty composed with a monotonic decreasing
penalty is equivalent to a gam with only an intercept
"""
X, y = hepatitis_X_y
gam_compose = LinearGAM(s(0, constraints=['monotonic_inc', 'monotonic_dec'])).fit(X, y)
gam_intercept = LinearGAM(terms=None).fit(X, y)
assert np.allclose(gam_compose.coef_[-1], gam_intercept.coef_)
def test_constraints_and_tensor(self, chicago_X_y):
"""a model that has consrtraints and tensor terms should not fail to build
because of inability of tensor terms to build a 'none' constraint
"""
X, y = chicago_X_y
gam = PoissonGAM(s(0, constraints='monotonic_inc') + te(3, 1) + s(2)).fit(X, y)
assert gam._is_fitted
|
test/SIM_alloc_test/RUN_test/input.py | gilbertguoze/trick | 647 | 12736576 | <gh_stars>100-1000
# Creates a local class. The class is destructed/deleted when the function returns.
def create_local_alloc_test():
test = trick.AllocTest()
# Creates a class that is controlled by the Memory Manager (MM). It is not freed when the function returns.
# TMM_declare_var returns a void *. We can cast it to the correct type so we can access the class fields
# in python. This allocation will not have a name in the memory manager.
def create_tmm_alloc_test():
return trick.castAsAllocTest(trick.TMM_declare_var_s("AllocTest"))
# Creates a class that is controlled by the Memory Manager (MM). It is not freed when the function returns.
# This allocation will have a name in the memory manager.
def create_new_alloc_test():
# test is known to the memory manager, it must be deleted with TMM_delete_var
return trick.AllocTest(TMMName="my_alloc_test")
# Creates a local class. The class is destructed/deleted when the function returns.
# Underneath this class declares itself to the memory manager and is controlled by the MM. The class destructor
# automatically removes the allocation from the memory manager.
def create_local_alloc_test_with_mm_interface():
test = trick.AllocTestWithMMInterface()
# Creates a class that is controlled by the Memory Manager (MM). It is not freed when the function returns.
# TMM_declare_var returns a void *. We can cast it to the correct type so we can access the class fields
# in python. This allocation will not have a name in the memory manager.
def create_tmm_alloc_test_with_mm_interface():
return trick.castAsAllocTestWithMMInterface(trick.TMM_declare_var_s("AllocTestWithMMInterface"))
# Creates a class that is controlled by the Memory Manager (MM). It is not freed when the function returns.
# This allocation will have a name in the memory manager.
def create_new_alloc_test_with_mm_interface():
return trick.AllocTestWithMMInterface(TMMName="my_alloc_test_with_mm_interface")
def main():
print "\ncreate local python tracked allocation"
create_local_alloc_test()
print "\ncreate through TMM_declare_var"
test = create_tmm_alloc_test()
trick.TMM_delete_var_a(test)
print "\ncreate with TMMName"
test = create_new_alloc_test()
trick.TMM_delete_var_a(test)
print "\ncreate local python tracked allocation. Class uses TRICK_MM_INTERFACE."
create_local_alloc_test_with_mm_interface()
print "\ncreate through TMM_declare_var"
test = create_tmm_alloc_test_with_mm_interface()
trick.TMM_delete_var_a(test)
print "\ncreate through TMMName with TRICK_MM_INTERFACE"
test = create_new_alloc_test_with_mm_interface()
trick.TMM_delete_var_n("my_alloc_test_with_mm_interface")
trick.stop(1.0)
if __name__ == "__main__":
main()
|
geoviews/models/__init__.py | pmav99/geoviews | 172 | 12736593 | <reponame>pmav99/geoviews<gh_stars>100-1000
from .custom_tools import ( # noqa
CheckpointTool, ClearTool, PolyVertexDrawTool, PolyVertexEditTool,
RestoreTool
)
|
util/heap/__init__.py | EarthCompass/patchkit | 631 | 12736692 | <gh_stars>100-1000
import os
import functools
from util import read
"""
Replace a custom heap with dlmalloc
Usage:
from util import heap
heap.declare(pt.linker)
pt.patch(addr, sym='dlmalloc')
pt.patch(addr, sym='dlcalloc')
pt.patch(addr, sym='dlfree')
pt.patch(addr, sym='dlrealloc')
"""
__all__ = ["apply"]
dlmalloc = {'symbols': {
'dlmalloc': 'void *dlmalloc(size_t size)',
'dlfree': 'void dlfree(void *addr)',
'dlcalloc': 'void *dlcalloc(size_t count, size_t size)',
'dlrealloc': 'void *dlrealloc(void *addr, size_t size)',
}, 'source': read('heap/malloc.c')}
def declare(linker):
if not 'dlmalloc' in linker:
linker.declare(**dlmalloc)
|
dizoo/gfootball/model/bots/__init__.py | sailxjx/DI-engine | 464 | 12736696 | from .kaggle_5th_place_model import FootballKaggle5thPlaceModel
from .rule_based_bot import FootballRuleBaseModel |
src/hammer-vlsi/par/nop.py | XiaoSanchez/hammer | 138 | 12736714 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# nop.py
# No-op place and route tool.
#
# See LICENSE for licence details.
from hammer_vlsi import HammerPlaceAndRouteTool, DummyHammerTool
from typing import List, Optional
from decimal import Decimal
class NopPlaceAndRoute(HammerPlaceAndRouteTool, DummyHammerTool):
def fill_outputs(self) -> bool:
self.output_ilms = []
self.output_gds = "/dev/null"
self.output_netlist = "/dev/null"
self.output_sim_netlist = "/dev/null"
self.hcells_list = []
return True
def specify_power_straps(self, layer_name: str, bottom_via_layer_name: str, blockage_spacing: Decimal, pitch: Decimal, width: Decimal, spacing: Decimal, offset: Decimal, bbox: Optional[List[Decimal]], nets: List[str], add_pins: bool) -> List[str]:
return []
def specify_std_cell_power_straps(self, blockage_spacing: Decimal, bbox: Optional[List[Decimal]], nets: List[str]) -> List[str]:
return []
tool = NopPlaceAndRoute
|
utils/data_gen.py | ShenLeixian/data2vis | 103 | 12736725 | <reponame>ShenLeixian/data2vis<gh_stars>100-1000
import data_utils
import json
# Generate training data splits.
# input source_directory - path to directory containing vegalite examples
# data_split_params - train/text/dev data split configuration
# output_directory - path to directory containing generated train/dev/test source files and vocabularies
source_directory = "examples"
data_split_params = [{"tag": "train","percentage":[0,0.8]},{"tag": "dev","percentage":[0.8,0.9]},{"tag": "test","percentage":[0.9,1]}]
output_directory = "sourcedata"
data_utils.generate_data_pairs(source_directory,output_directory, data_split_params) |
test/restful/test_rooms.py | thenetcircle/dino | 150 | 12736749 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from datetime import datetime
from datetime import timedelta
from dino import environ
from dino.config import ConfigKeys
from dino.rest.resources.banned import BannedResource
__author__ = '<NAME> <<EMAIL>>'
from dino.rest.resources.rooms import RoomsResource
class FakeDb(object):
def set_all_rooms(self):
pass
def get_all_rooms(self):
return [
{
'id': '1',
'status': 'private',
'name': 'foo',
'channel': 'foo channel'
},
{
'id': '2',
'status': 'public',
'name': 'bar',
'channel': 'bar channel'
},
]
class RoomsTest(TestCase):
def setUp(self):
environ.env.db = FakeDb()
self.resource = RoomsResource()
def test_get(self):
self.assertEqual(2, len(self.resource.do_get()))
def test_set_last_cleared(self):
last_cleared = self.resource._get_last_cleared()
self.resource._set_last_cleared(datetime.utcnow()+timedelta(minutes=5))
self.assertNotEqual(last_cleared, self.resource._get_last_cleared())
|
test.py | Truth0906/PTTLibrary | 260 | 12736753 | import sys
import os
import time
import json
import random
import traceback
import threading
from PyPtt import PTT
def get_password(password_file):
try:
with open(password_file) as AccountFile:
account = json.load(AccountFile)
ptt_id = account['id']
password = account['pw']
except FileNotFoundError:
print(f'Please write PTT ID and Password in {password_file}')
print('{"id":"your ptt id", "pw":"your ptt pw"}')
sys.exit()
return ptt_id, password
def init():
print('===正向===')
print('===預設值===')
PTT.API()
print('===中文顯示===')
PTT.API(language=PTT.i18n.language.CHINESE)
print('===英文顯示===')
PTT.API(language=PTT.i18n.language.ENGLISH)
print('===log DEBUG===')
PTT.API(log_level=PTT.log.level.DEBUG)
print('===log INFO===')
PTT.API(log_level=PTT.log.level.INFO)
print('===log SLIENT===')
PTT.API(log_level=PTT.log.level.SILENT)
print('===log SLIENT======')
print('===負向===')
try:
print('===語言 99===')
PTT.API(language=99)
except ValueError:
print('通過')
except:
print('沒通過')
sys.exit(-1)
print('===語言放字串===')
try:
PTT.API(language='PTT.i18n.language.ENGLISH')
except TypeError:
print('通過')
except:
print('沒通過')
sys.exit(-1)
def handler(msg):
with open('log.txt', 'a', encoding='utf-8') as f:
f.write(msg + '\n')
ptt_bot = PTT.API(
log_handler=handler)
ptt_bot.log('Test log')
def performance_test():
test_time = 2000
print(f'效能測試 get_time {test_time} 次')
start_time = time.time()
for _ in range(test_time):
ptt_time = ptt_bot.get_time()
if ptt_time is None:
print('PTT_TIME is None')
break
# print(ptt_time)
end_time = time.time()
print('Performance Test get_time ' + str(
round(end_time - start_time, 2)) + ' s')
start_time = time.time()
for _ in range(test_time):
ptt_time = ptt_bot.fast_get_time()
if ptt_time is None:
print('PTT_TIME is None')
break
# print(ptt_time)
end_time = time.time()
print('Performance Test fast_get_time ' + str(
round(end_time - start_time, 2)) + ' s')
ptt_bot.logout()
print('Performance Test finish')
sys.exit()
# for _ in range(1000):
# ptt_time = ptt_bot.fast_get_time()
# if len(ptt_time) != 5:
# print('error!', ptt_time)
# break
# # print(ptt_time)
def get_post():
def show(name, value):
if value is not None:
print(f'{name} [{value}]')
else:
print(f'無{name}')
if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
test_post_list = [
('Python', 1),
('NotExitBoard', 1),
('Python', '1TJH_XY0'),
# 文章格式錯誤
('Steam', 4444),
('Stock', 92324),
('Stock', '1TVnEivO'),
# 文章格式錯誤
('movie', 457),
('Gossiping', '1UDnXefr'),
('joke', '1Tc6G9eQ'),
# 135193
('Test', 575),
# 待證文章
('Test', '1U3pLzi0'),
# 古早文章
('LAW', 1),
# 辦刪除文章
('Test', 347),
# push number parse error
('Ptt25sign', '1VppdKLW'),
]
else:
test_post_list = [
# PTT2
('PttSuggest', 1),
('PttSuggest', '0z7TVw00'),
# 文章格式錯誤
# 發信站:
('PttSuggest', '1EbQObff'),
# 文章起始消失跳躍,導致沒有結尾 (已經修正)
('WhoAmI', '1Tc0ooap'),
# Test
# 文章格式錯誤
# 瞎改
('Test', '1Sp1W7Fi'),
('Test', '1TXRkuDW'),
('WhoAmI', '1TqJhzQH')
]
query = False
for (board, index) in test_post_list:
try:
print('看板', board, index)
if isinstance(index, int):
post_info = ptt_bot.get_post(
board,
post_index=index,
# SearchType=PTT.data_type.post_search_type.KEYWORD,
# SearchCondition='公告',
query=query)
else:
post_info = ptt_bot.get_post(
board,
post_aid=index,
# SearchType=PTT.data_type.post_search_type.KEYWORD,
# SearchCondition='公告',
query=query)
if post_info is None:
print('Empty')
continue
if not post_info.pass_format_check:
print('文章格式錯誤')
continue
if post_info.is_lock:
print('鎖文狀態')
continue
if post_info.delete_status != PTT.data_type.post_delete_status.NOT_DELETED:
print('文章已經被刪除')
continue
# show('Origin Post\n', post.origin_post)
if not query:
print('Origin Post\n' + post_info.origin_post)
print('=' * 30 + ' Origin Post Finish')
show('Board', post_info.board)
show('AID', post_info.aid)
show('push num', post_info.push_number)
show('index', post_info.index)
show('Author', post_info.author)
show('push_number', post_info.push_number)
show('List Date', post_info.list_date)
show('Title', post_info.title)
show('Money', post_info.money)
show('URL', post_info.web_url)
if post_info.is_unconfirmed:
print('待證實文章')
if not query:
show('Date', post_info.date)
show('Content', post_info.content)
show('IP', post_info.ip)
show('Location', post_info.location)
# 在文章列表上的日期
push_count = 0
boo_count = 0
arrow_count = 0
for push_obj in post_info.push_list:
# print(Push.getType())
# print(Push.getAuthor())
# print(Push.getContent())
# print(Push.getIP())
# print(Push.time)
if push_obj.type == PTT.data_type.push_type.PUSH:
push_count += 1
push_type = '推'
if push_obj.type == PTT.data_type.push_type.BOO:
boo_count += 1
push_type = '噓'
if push_obj.type == PTT.data_type.push_type.ARROW:
arrow_count += 1
push_type = '→'
author = push_obj.author
content = push_obj.content
# Buffer = f'[{Author}] 給了一個{Type} 說 [{Content}]'
# if Push.getIP() is not None:
# Buffer += f' 來自 [{Push.getIP()}]'
# Buffer += f' 時間是 [{Push.time}]'
if push_obj.ip is not None:
buffer = f'{push_type} {author}: {content} {push_obj.ip} {push_obj.time}'
else:
buffer = f'{push_type} {author}: {content} {push_obj.time}'
print(buffer)
# print(post_info.origin_post)
print(
f'Total {push_count} Pushs {boo_count} Boo {arrow_count} Arrow = {push_count - boo_count}')
except Exception as e:
traceback.print_tb(e.__traceback__)
print(e)
def get_aid_from_url():
# test_url = [
# 'https://www.ptt.cc/bbs/NDHU-His_WV/M.1072146614.A.D59.html',
# 'https://www.ptt.cc/bbs/NDMC-M99c/M.1084922723.A.html',
# ]
#
# for url in test_url:
# board, aid = ptt_bot.get_aid_from_url(url)
# print(board, aid)
#
# return
bug_board = [
'ck55th316'
]
def random_board_test():
board_list = ptt_bot.get_board_list()
board_list = [x for x in board_list if x not in bug_board]
test_range = 5000
test_board = random.sample(board_list, test_range)
for test_board in test_board:
print(test_board)
newest_index = ptt_bot.get_newest_index(PTT.data_type.index_type.BBS, board=test_board)
print(f'newest_index {newest_index}')
if newest_index == 0:
continue
while True:
current_index = random.randrange(1, newest_index + 1)
print(current_index)
post_info = ptt_bot.get_post(test_board, post_index=current_index, query=True)
if post_info.delete_status != PTT.data_type.post_delete_status.NOT_DELETED:
continue
if post_info.web_url is None:
print(f'error url is None {test_board} {current_index}')
break
if post_info.aid is None:
print(f'error aid is None {test_board} {current_index}')
continue
convert_board, convert_aid = ptt_bot.get_aid_from_url(post_info.web_url)
if convert_board != test_board:
print('board not match')
print(f'post_info {test_board}')
print(f'convert {convert_board}')
raise ValueError()
if convert_aid != post_info.aid:
print('aid not match')
print(f'post_info {post_info.aid}')
print(f'convert {convert_aid}')
raise ValueError()
break
print('===================================')
def random_post_test():
test_board = 'Gossiping'
newest_index = ptt_bot.get_newest_index(PTT.data_type.index_type.BBS, board=test_board)
print(f'{test_board} newest_index {newest_index}')
test_range = 5000
start_index = random.randrange(1, newest_index + 1 - test_range)
print(start_index)
for current_index in range(start_index, start_index + test_range):
print(current_index)
post_info = ptt_bot.get_post(test_board, post_index=current_index, query=True)
if post_info.delete_status != PTT.data_type.post_delete_status.NOT_DELETED:
continue
if post_info.web_url is None:
print(f'error url is None {test_board} {current_index}')
break
if post_info.aid is None:
print(f'error aid is None {test_board} {current_index}')
continue
convert_board, convert_aid = ptt_bot.get_aid_from_url(post_info.web_url)
if convert_board != test_board:
print('board not match')
print(f'post_info {test_board}')
print(f'convert {convert_board}')
raise ValueError()
if convert_aid != post_info.aid:
print('aid not match')
print(f'post_info {post_info.aid}')
print(f'convert {convert_aid}')
raise ValueError()
random_post_test()
test_list = {
('Wanted', PTT.data_type.post_search_type.KEYWORD, '[公告]'),
('Wanted', PTT.data_type.post_search_type.AUTHOR, 'gogin'),
('Wanted', PTT.data_type.post_search_type.PUSH, '10'),
('Wanted', PTT.data_type.post_search_type.MARK, 'm'),
('Wanted', PTT.data_type.post_search_type.MONEY, '5'),
('Gossiping', PTT.data_type.post_search_type.KEYWORD, '[公告]'),
('Gossiping', PTT.data_type.post_search_type.AUTHOR, 'ReDmango'),
('Gossiping', PTT.data_type.post_search_type.PUSH, '10'),
('Gossiping', PTT.data_type.post_search_type.MARK, 'm'),
('Gossiping', PTT.data_type.post_search_type.MONEY, '5'),
('Gossiping', PTT.data_type.post_search_type.PUSH, '-100'),
('Gossiping', PTT.data_type.post_search_type.PUSH, '150'),
}
def show_condition(test_board, search_type, condition):
if search_type == PTT.data_type.post_search_type.KEYWORD:
type_str = '關鍵字'
if search_type == PTT.data_type.post_search_type.AUTHOR:
type_str = '作者'
if search_type == PTT.data_type.post_search_type.PUSH:
type_str = '推文數'
if search_type == PTT.data_type.post_search_type.MARK:
type_str = '標記'
if search_type == PTT.data_type.post_search_type.MONEY:
type_str = '稿酬'
print(f'{test_board} 使用 {type_str} 搜尋 {condition}')
def get_post_with_condition():
# PTT1
if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
test_list = [
('Python', PTT.data_type.post_search_type.KEYWORD, '[公告]'),
('ALLPOST', PTT.data_type.post_search_type.KEYWORD, '(Wanted)'),
('Wanted', PTT.data_type.post_search_type.KEYWORD, '(本文已被刪除)'),
('ALLPOST', PTT.data_type.post_search_type.KEYWORD, '(Gossiping)'),
('Gossiping', PTT.data_type.post_search_type.KEYWORD, '普悠瑪'),
]
else:
test_list = [
('PttSuggest', PTT.data_type.post_search_type.KEYWORD, '[問題]'),
('PttSuggest', PTT.data_type.post_search_type.PUSH, '10'),
]
test_range = 1
query = False
for (board, search_type, condition) in test_list:
show_condition(board, search_type, condition)
index = ptt_bot.get_newest_index(
PTT.data_type.index_type.BBS,
board,
search_type=search_type,
search_condition=condition)
print(f'{board} 最新文章編號 {index}')
for i in range(test_range):
post = ptt_bot.get_post(
board,
post_index=index - i,
# PostIndex=611,
search_type=search_type,
search_condition=condition,
query=query)
print('列表日期:')
print(post.list_date)
print('作者:')
print(post.author)
print('標題:')
print(post.title)
if post.delete_status == PTT.data_type.post_delete_status.NOT_DELETED:
if not query:
print('內文:')
print(post.content)
elif post.delete_status == PTT.data_type.post_delete_status.AUTHOR:
print('文章被作者刪除')
elif post.delete_status == PTT.data_type.post_delete_status.MODERATOR:
print('文章被版主刪除')
print('=' * 50)
# TestList = [
# ('Python', PTT.data_type.post_search_type.KEYWORD, '[公告]')
# ]
# for (Board, SearchType, Condition) in TestList:
# index = PTTBot.getNewestIndex(
# PTT.data_type.index_type.BBS,
# Board,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# print(f'{Board} 最新文章編號 {index}')
# Post = PTTBot.getPost(
# Board,
# PostIndex=index,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# print('標題: ' + Post.getTitle())
# print('=' * 50)
search_list = [
(PTT.data_type.post_search_type.KEYWORD, '新聞'),
(PTT.data_type.post_search_type.AUTHOR, 'Code'),
]
index = ptt_bot.get_newest_index(
PTT.data_type.index_type.BBS,
'Gossiping',
search_type=PTT.data_type.post_search_type.KEYWORD,
search_condition='新聞',
search_list=search_list)
print(f'Gossiping 最新文章編號 {index}')
for current_index in range(1, index + 1):
post_info = ptt_bot.get_post(
'Gossiping',
post_index=current_index,
search_type=PTT.data_type.post_search_type.KEYWORD,
search_condition='新聞',
search_list=search_list,
query=True)
print(current_index, post_info.title)
def post():
content = '''
此為 PyPtt 貼文測試內容,如有打擾請告知。
github: https://github.com/PttCodingMan/PyPtt
開發手冊: https://github.com/PttCodingMan/PyPtt/tree/master/doc
ポ
ポポ
ポポポ
☂
☂☂
☂☂☂
'''
content = content.replace('\n', '\r\n')
for _ in range(3):
ptt_bot.post(
# 看板
'Test',
# 標題
'PyPtt 程式貼文測試',
# 內文
content,
# 標題分類
1,
# 簽名檔
0)
def get_newest_index():
if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
test_board_list = [
'Wanted',
'Gossiping',
'Test',
'Stock',
'movie'
]
else:
test_board_list = [
'PttSuggest',
'Test',
'WhoAmI',
'CodingMan'
]
test_range = 100
for board in test_board_list:
for _ in range(test_range):
index = ptt_bot.get_newest_index(PTT.data_type.index_type.BBS, board=board)
print(f'{board} 最新文章編號 {index}')
###############################################
index = ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)
print(f'最新郵件編號 {index}')
index = ptt_bot.get_newest_index(
PTT.data_type.index_type.MAIL,
search_type=PTT.data_type.mail_search_type.KEYWORD,
search_condition='uPtt system')
print(f'最新郵件編號 {index}')
search_list = [
(PTT.data_type.mail_search_type.KEYWORD, 'uPtt'),
(PTT.data_type.mail_search_type.KEYWORD, 'key')
]
index = ptt_bot.get_newest_index(
PTT.data_type.index_type.MAIL,
search_list=search_list)
print(f'最新郵件編號 {index}')
def showValue(Msg, Value):
print(f'{Msg} =>{Value}<=')
def detectNone(Name, Obj, Enable=True):
if Obj is None and Enable:
raise ValueError(Name + ' is None')
query = False
def crawlHandler(Post):
global query
if Post.delete_status != PTT.data_type.post_delete_status.NOT_DELETED:
if Post.delete_status == PTT.data_type.post_delete_status.MODERATOR:
# print(f'[版主刪除][{Post.getAuthor()}]')
pass
elif Post.delete_status == PTT.data_type.post_delete_status.AUTHOR:
# print(f'[作者刪除][{Post.getAuthor()}]')
pass
elif Post.delete_status == PTT.data_type.post_delete_status.UNKNOWN:
# print(f'[不明刪除]')
pass
return
# if Post.getTitle().startswith('Fw:') or Post.getTitle().startswith('轉'):
# print(f'[{Post.aid}][{Post.getAuthor()}][{Post.getTitle()}]')
# print(f'[{Post.getContent()}]')
# print(f'[{Post.getAuthor()}][{Post.getTitle()}]')
PushNumber = Post.push_number
if PushNumber is not None:
if PushNumber == '爆':
pass
elif PushNumber.startswith('X'):
N = PushNumber[1:]
else:
pass
# if not PushNumber.isdigit():
# print(f'[{Post.aid}][{Post.push_number}]')
# print(f'[{Post.aid}][{Post.push_number}]')
# print(f'[{Post.aid}][{Post.push_number}]')
# raise ValueError()
# print(f'[{Post.aid}][{Post.getPushNumber()}]')
detectNone('標題', Post.title)
# detectNone('AID', Post.aid)
detectNone('Author', Post.author)
# detectNone('Money', Post.getMoney())
# detectNone('WebUrl', Post.web_url)
# detectNone('ListDate', Post.getListDate())
# if not Query:
# detectNone('Date', Post.getDate())
# detectNone('Content', Post.getContent())
# detectNone('IP', Post.getIP())
# time.sleep(0.2)
def crawl_board():
global query
if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
test_board_list = [
'Test',
'Wanted',
'Gossiping',
'Stock',
'movie',
'C_Chat',
'Baseball',
'NBA',
'HatePolitics',
]
else:
test_board_list = [
'Test',
'WhoAmI',
'PttSuggest'
]
# crawl_type = PTT.data_type.index_type.WEB
crawl_type = PTT.data_type.index_type.BBS
index_type = 'Index'
test_range = 100
test_round = 2
for _ in range(test_round):
for TestBoard in test_board_list:
if crawl_type == PTT.data_type.index_type.BBS:
if index_type == 'Index':
newest_index = ptt_bot.get_newest_index(
PTT.data_type.index_type.BBS,
board=TestBoard)
start_index = newest_index - test_range + 1
print(
f'預備爬行 {TestBoard} 編號 {start_index} ~ {newest_index} 文章')
print(f'TestBoard [{TestBoard}]')
error_post_list, del_post_list = ptt_bot.crawl_board(
PTT.data_type.crawl_type.BBS,
crawlHandler,
TestBoard,
start_index=start_index,
end_index=newest_index,
query=query)
elif index_type == 'AID':
start_aid = '1TnDKzxw'
end_aid = '1TnCPFGu'
error_post_list, del_post_list = ptt_bot.crawl_board(
PTT.data_type.crawl_type.BBS,
crawlHandler,
TestBoard,
start_aid=start_aid,
end_aid=end_aid)
if len(error_post_list) > 0:
print('格式錯誤文章: \n' + '\n'.join(str(x)
for x in error_post_list))
else:
print('沒有偵測到格式錯誤文章')
if len(del_post_list) > 0:
print(f'共有 {len(del_post_list)} 篇文章被刪除')
elif crawl_type == PTT.data_type.index_type.WEB:
newest_index = ptt_bot.get_newest_index(
PTT.data_type.index_type.WEB,
board=TestBoard)
end_page = newest_index
start_page = end_page - test_range + 1
print(f'預備爬行 {TestBoard} 最新頁數 {newest_index}')
print(f'預備爬行 {TestBoard} 編號 {start_page} ~ {end_page} 文章')
error_post_list, del_post_list = ptt_bot.crawl_board(
PTT.data_type.crawl_type.WEB,
crawlHandler,
TestBoard,
start_page=start_page,
end_page=end_page)
if len(del_post_list) > 0:
print('\n'.join(del_post_list))
print(f'共有 {len(del_post_list)} 篇文章被刪除')
def crawl_board_with_condition():
# TestRange = 10
# for (Board, SearchType, Condition) in TestList:
# try:
# showCondition(Board, SearchType, Condition)
# NewestIndex = PTTBot.getNewestIndex(
# PTT.data_type.index_type.BBS,
# Board,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# print(f'{Board} 最新文章編號 {NewestIndex}')
# StartIndex = NewestIndex - TestRange + 1
# ErrorPostList, DelPostList = PTTBot.crawlBoard(
# crawlHandler,
# Board,
# StartIndex=StartIndex,
# EndIndex=NewestIndex,
# SearchType=SearchType,
# SearchCondition=Condition,
# )
# # print('標題: ' + Post.getTitle())
# print('=' * 50)
# except Exception as e:
# traceback.print_tb(e.__traceback__)
# print(e)
# if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
# test_list = [
# # ptt1
# ('Stock', PTT.data_type.post_search_type.KEYWORD, '盤中閒聊'),
# ('Baseball', PTT.data_type.post_search_type.PUSH, '20')
# ]
# else:
# test_list = [
# ('WhoAmI', PTT.data_type.post_search_type.KEYWORD, '[閒聊]'),
# ('WhoAmI', PTT.data_type.post_search_type.PUSH, '10')
# ]
#
# test_range = 100
#
# for (board, search_type, search_condition) in test_list:
# show_condition(board, search_type, search_condition)
# newest_index = ptt_bot.get_newest_index(
# PTT.data_type.index_type.BBS,
# board,
# search_type=search_type,
# search_condition=search_condition)
# print(f'{board} 最新文章編號 {newest_index}')
#
# start_index = newest_index - test_range + 1
#
# error_post_list, del_post_list = ptt_bot.crawl_board(
# PTT.data_type.crawl_type.BBS,
# crawlHandler,
# board,
# start_index=start_index,
# end_index=newest_index,
# search_type=search_type,
# search_condition=search_condition,
# )
# print('=' * 50)
search_list = [
(PTT.data_type.post_search_type.KEYWORD, '新聞'),
(PTT.data_type.post_search_type.AUTHOR, 'Code'),
]
newest_index = ptt_bot.get_newest_index(
PTT.data_type.index_type.BBS,
'Gossiping',
search_list=search_list)
print(f'Gossiping 最新文章編號 {newest_index}')
error_post_list, del_post_list = ptt_bot.crawl_board(
PTT.data_type.crawl_type.BBS,
crawlHandler,
'Gossiping',
start_index=1,
end_index=newest_index,
search_list=search_list)
def get_user():
test_user = [
# 暱稱有特殊字元
'for40255',
'CodingMan'
]
test_user = ptt_bot.search_user('c', max_page=1)
test_user = test_user[:10]
print(f'共有 {len(test_user)} 使用者')
for user in test_user:
try:
ptt_bot.log(user)
user = ptt_bot.get_user(user)
if user is None:
return
ptt_bot.log('使用者ID: ' + user.id)
ptt_bot.log('使用者經濟狀況: ' + str(user.money))
ptt_bot.log('登入次數: ' + str(user.login_time))
ptt_bot.log('帳戶通過認證: ' + str(user.account_verified))
ptt_bot.log('有效文章數: ' + str(user.legal_post))
ptt_bot.log('退文文章數: ' + str(user.illegal_post))
ptt_bot.log('目前動態: ' + user.status)
ptt_bot.log('信箱狀態: ' + user.mail_status)
ptt_bot.log('最後登入時間: ' + user.last_login)
ptt_bot.log('上次故鄉: ' + user.last_ip)
ptt_bot.log('五子棋戰績: ' + user.five_chess)
ptt_bot.log('象棋戰績:' + user.chess)
ptt_bot.log('簽名檔:' + user.signature_file)
ptt_bot.log('=====================')
except PTT.exceptions.NoSuchUser:
print('無此使用者')
try:
user = ptt_bot.get_user('sdjfklsdj')
except PTT.exceptions.NoSuchUser:
print('無此使用者')
def push():
test_post_list = [
# ('Gossiping', 95692),
# ('Test', 'QQQQQQ'),
('Test', 383),
# ('Wanted', '1Teyovc3')
]
# 分段推文
content = '批踢踢實業坊,簡稱批踢踢、PTT,是一個臺灣電子布告欄(BBS),採用Telnet BBS技術運作,建立在台灣學術網路的資源之上,以學術性質為原始目的,提供線上言論空間。目前由國立臺灣大學電子布告欄系統研究社管理,大部份的系統原始碼由國立臺灣大學資訊工程學系的學生與校友進行維護,並且邀請法律專業人士擔任法律顧問。它有兩個分站,分別為批踢踢兔與批踢踢參。目前在批踢踢實業坊與批踢踢兔註冊總人數約150萬人,尖峰時段兩站超過15萬名使用者同時上線,擁有超過2萬個不同主題的看板,每日超過2萬篇新文章及50萬則推文被發表,是台灣使用人次最多的網路論壇之一。'
# 短推文
# content = '安安'
# 連續重複推文
# content = '''安安
# 安安
# 安安
# 安安
# 安安
# '''
testround: int = 3
for (board, index) in test_post_list:
for i in range(testround):
if isinstance(index, int):
ptt_bot.push(board, PTT.data_type.push_type.PUSH, content, post_index=index)
else:
ptt_bot.push(board, PTT.data_type.push_type.PUSH, content, post_aid=index)
# Index = PTTBot.getNewestIndex(
# PTT.data_type.index_type.BBS,
# Board='Test'
# )
# PTTBot.push('Test', PTT.data_type.push_type.PUSH, Content, PostIndex=Index + 1)
def throw_waterball():
ptt_id = 'DeepLearning'
# TestWaterBall = [str(x) + '_' * 35 + ' 水球測試結尾' for x in range(30)]
# # TestWaterBall = TestWaterBall * 3
# TestWaterBall = '\n'.join(TestWaterBall)
test_waterball = '水球測試1 :D\n水球測試2 :D'
ptt_bot.throw_waterball(ptt_id, test_waterball)
# time.sleep(3)
def get_waterball():
# operate_type = PTT.data_type.waterball_operate_type.NOTHING
# OperateType = PTT.data_type.waterball_operate_type.MAIL
operate_type = PTT.data_type.waterball_operate_type.CLEAR
while True:
newest_index = ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)
waterball_list = ptt_bot.get_waterball(operate_type)
if waterball_list is None:
return
# print('Result:')
for waterball in waterball_list:
if waterball.type == PTT.data_type.waterball_type.CATCH:
temp = '★' + waterball.target + ' '
elif waterball.type == PTT.data_type.waterball_type.SEND:
temp = 'To ' + waterball.target + ': '
temp += waterball.content + ' [' + waterball.date + ']'
print(temp)
time.sleep(0.5)
def call_status():
def show_call_status(call_status):
if call_status == PTT.data_type.call_status.ON:
print('呼叫器狀態[打開]')
elif call_status == PTT.data_type.call_status.OFF:
print('呼叫器狀態[關閉]')
elif call_status == PTT.data_type.call_status.UNPLUG:
print('呼叫器狀態[拔掉]')
elif call_status == PTT.data_type.call_status.WATERPROOF:
print('呼叫器狀態[防水]')
elif call_status == PTT.data_type.call_status.FRIEND:
print('呼叫器狀態[朋友]')
else:
print(f'Unknow call_status: {call_status}')
for _ in range(5):
current_call_status = ptt_bot.get_call_status()
show_call_status(current_call_status)
print('連續測試通過')
init_call_status = random.randint(
PTT.data_type.call_status.min_value, PTT.data_type.call_status.max_value
)
test_queue = [x for x in range(
PTT.data_type.call_status.min_value, PTT.data_type.call_status.max_value + 1
)]
random.shuffle(test_queue)
print('初始呼叫器狀態')
show_call_status(init_call_status)
print('測試切換呼叫器狀態順序')
for CurrentTeststatus in test_queue:
show_call_status(CurrentTeststatus)
ptt_bot.set_call_status(init_call_status)
current_call_status = ptt_bot.get_call_status()
if current_call_status != init_call_status:
print('設定初始呼叫器狀態: 不通過')
return
print('設定初始呼叫器狀態: 通過')
for CurrentTeststatus in test_queue:
print('準備設定呼叫器狀態')
show_call_status(CurrentTeststatus)
ptt_bot.set_call_status(CurrentTeststatus)
current_call_status = ptt_bot.get_call_status()
show_call_status(current_call_status)
if current_call_status != CurrentTeststatus:
print('設定呼叫器狀態: 不通過')
return
print('設定呼叫器狀態: 通過')
print('呼叫器測試全數通過')
def give_money():
ptt_bot.give_money('DeepLearning', 1)
ptt_bot.give_money('DeepLearning', 1, title='紅包袋標題')
ptt_bot.give_money('DeepLearning', 1, title='紅包袋標題', content='紅包袋內文')
ptt_bot.give_money('DeepLearning', 1, content='紅包袋內文')
def mail():
content = '\r\n\r\n'.join(
[
'如有誤寄,對..對不起',
'PyPtt 程式寄信測試內容',
'github: https://tinyurl.com/umqff3v'
]
)
try:
ptt_bot.mail(
'sdjfkdsjfls',
'程式寄信標題',
content,
0)
except PTT.exceptions.NoSuchUser:
pass
ptt_bot.mail(
ptt_id,
'程式寄信標題',
content,
0,
False)
newest_index = ptt_bot.get_newest_index(
PTT.data_type.index_type.MAIL)
print(f'最新郵件編號 {newest_index}')
# ptt_bot.del_mail(newest_index)
def has_new_mail():
result = ptt_bot.has_new_mail()
ptt_bot.log(f'{result} 封新信')
result = ptt_bot.has_new_mail()
ptt_bot.log(f'{result} 封新信')
ThreadBot = None
def threading_test():
id1, password1 = get_password('<PASSWORD>')
id2, password2 = get_password('<PASSWORD>')
def thread_func1():
thread_bot1 = PTT.API()
try:
thread_bot1.login(
id1,
password1,
# kick_other_login=True
)
except PTT.exceptions.LoginError:
thread_bot1.log('登入失敗')
return
thread_bot1.logout()
print('1 多線程測試完成')
def thread_func2():
thread_bot2 = PTT.API()
try:
thread_bot2.login(
id2,
password2,
# kick_other_login=True
)
except PTT.exceptions.LoginError:
thread_bot2.log('登入失敗')
return
thread_bot2.logout()
print('2 多線程測試完成')
t1 = threading.Thread(
target=thread_func1
)
t2 = threading.Thread(
target=thread_func2
)
t1.start()
t2.start()
t1.join()
t2.join()
# ThreadBot.log('Hi')
sys.exit()
def get_board_list():
board_list = ptt_bot.get_board_list()
# print(' '.join(BoardList))
print(f'總共有 {len(board_list)} 個板名')
print(f'總共有 {len(set(board_list))} 個不重複板名')
def reply_post():
reply_post_index = 383
ptt_bot.reply_post(
PTT.data_type.reply_type.BOARD,
'Test',
'測試回應到板上,如有打擾抱歉',
post_index=reply_post_index)
ptt_bot.reply_post(
PTT.data_type.reply_type.MAIL,
'Test',
'測試回應到信箱,如有打擾抱歉',
post_index=reply_post_index)
ptt_bot.reply_post(
PTT.data_type.reply_type.BOARD_MAIL,
'Test',
'測試回應到板上還有信箱,如有打擾抱歉',
post_index=reply_post_index)
def set_board_title():
from time import strftime
test_board = 'QQboard'
while True:
time_format = strftime('%H:%M:%S')
try:
ptt_bot.set_board_title(
test_board,
f'現在時間 {time_format}'
)
except PTT.exceptions.ConnectionClosed:
while True:
try:
ptt_bot.login(
ptt_id,
password
)
break
except PTT.exceptions.LoginError:
ptt_bot.log('登入失敗')
time.sleep(1)
except PTT.exceptions.ConnectError:
ptt_bot.log('登入失敗')
time.sleep(1)
print('已經更新時間 ' + time_format, end='\r')
try:
time.sleep(1)
except KeyboardInterrupt:
print('已經更新時間 ' + time_format)
ptt_bot.set_board_title(
test_board,
f'[{test_board}]'
)
print('板標已經恢復')
break
def mark_post():
board = 'CodingMan'
mark_type = PTT.data_type.mark_type.S
ptt_bot.mark_post(
mark_type,
board,
post_index=850
)
ptt_bot.mark_post(
mark_type,
board,
post_index=851
)
# if mark_type == PTT.data_type.mark_type.D:
# ptt_bot.mark_post(
# PTT.data_type.mark_type.DeleteD,
# 'CodingMan'
# )
# ptt_bot.mark_post(
# mark_type,
# 'QQBoard',
# post_index=2000
# )
# PTTBot.mark_post(
# mark_type,
# 'CodingMan',
# post_index=2000
# )
def get_favourite_board():
favourite_board_list = ptt_bot.get_favourite_board()
for board in favourite_board_list:
buff = f'[{board.board}][{board.type}][{board.title}]'
print(buff)
def get_board_info():
# 《Gossiping》看板設定
# b - 中文敘述: 綜合 ◎【八卦】沒有開放政問 珍惜帳號
# 板主名單: arsonlolita/xianyao/Bignana/XXXXGAY
# h - 公開狀態(是否隱形): 公開
# g - 隱板時 可以 進入十大排行榜
# e - 開放 非看板會員發文
# y - 開放 回應文章
# d - 開放 自刪文章 發文與推文限制:
# r - 開放 推薦文章 登入次數 700 次以上
# s - 開放 噓文 退文篇數 0 篇以下
# f - 限制 快速連推文章, 最低間隔時間: 5 秒
# i - 推文時 自動 記錄來源 IP 名單編輯與其它: (需板主權限)
# a - 推文時 不用對齊 開頭 w)設定水桶 v)可見會員名單
# k - 板主 可 刪除部份違規文字 m)舉辦投票 o)投票名單
# x - 轉錄文章 會 自動記錄,且 需要 發文權限 c)文章類別 n)發文注意事項
# j - 未 設為冷靜模式 p)進板畫面
# 8 - 禁止 未滿十八歲進入
# board_list = ptt_bot.get_board_list()
# for board in board_list:
# board_info = ptt_bot.get_board_info(board)
#
# if not board_info.is_push_record_ip:
# continue
# if board_info.is_push_aligned:
# continue
#
# print(f'{board} !!!!!!!!!!')
# # break
# return
if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
test_board_list = [
'Python',
'L_LifePlan',
'NDHU-sl103'
]
else:
test_board_list = [
'WhoAmI'
]
get_post_kind = True
for board in test_board_list:
board_info = ptt_bot.get_board_info(board, get_post_kind=get_post_kind)
print('==============')
print('板名: ', board_info.board)
print('線上人數: ', board_info.online_user)
print('中文敘述: ', board_info.chinese_des)
print('板主: ', board_info.moderators)
print('公開狀態(是否隱形): ', board_info.is_open)
print('隱板時是否可進入十大排行榜: ', board_info.is_into_top_ten_when_hide)
print('是否開放非看板會員發文: ', board_info.can_non_board_members_post)
print('是否開放回應文章: ', board_info.can_reply_post)
print('是否開放自刪文章: ', board_info.can_self_del_post)
print('是否開放推薦文章: ', board_info.can_push_post)
print('是否開放噓文: ', board_info.can_boo_post)
print('是否可以快速連推文章: ', board_info.can_fast_push)
print('推文最低間隔時間: ', board_info.min_interval)
print('推文時是否記錄來源 IP: ', board_info.is_push_record_ip)
print('推文時是否對齊開頭: ', board_info.is_push_aligned)
print('板主是否可刪除部份違規文字: ', board_info.can_moderator_del_illegal_content)
print('轉錄文章是否自動記錄,且是否需要發文權限: ',
board_info.is_tran_post_auto_recorded_and_require_post_permissions)
print('是否為冷靜模式: ', board_info.is_cool_mode)
print('是否需要滿十八歲才可進入: ', board_info.is_require18)
print('發文與推文限制登入次數需多少次以上: ', board_info.require_login_time)
print('發文與推文限制退文篇數多少篇以下: ', board_info.require_illegal_post)
if get_post_kind:
print('發文種類:', ' '.join(board_info.post_kind))
def get_bottom_post_list():
test_board_list = [
'Wanted',
'Python',
'Gossiping'
]
print('=' * 50)
for board in test_board_list:
bottom_post_list = ptt_bot.get_bottom_post_list(board)
if len(bottom_post_list) == 0:
print(f'{board} 板無置頂文章')
else:
print(f'{board} 共有 {len(bottom_post_list)} 置頂文章')
for post in bottom_post_list:
print(post.title)
print('=' * 50)
def del_post():
content = '''
此為 PyPtt 貼文測試內容,如有打擾請告知。
github: https://github.com/PttCodingMan/PyPtt
'''
content = content.replace('\n', '\r\n')
for _ in range(3):
ptt_bot.post(
# 看板
'Test',
# 標題
'PyPtt 程式貼文測試',
# 內文
content,
# 標題分類
1,
# 簽名檔
0)
index = ptt_bot.get_newest_index(PTT.data_type.index_type.BBS, 'Test')
for i in range(5):
current_index = index - int(i)
try:
ptt_bot.del_post('Test', post_index=current_index)
ptt_bot.log(f'Test {current_index} 刪除成功')
except PTT.exceptions.NoPermission:
ptt_bot.log(f'Test {current_index} 無刪除權限')
except PTT.exceptions.DeletedPost:
ptt_bot.log(f'Test {current_index} 已經被刪除')
except PTT.exceptions.NoSuchPost:
ptt_bot.log(f'Test {current_index} 無此文章')
def bucket():
ptt_bot.bucket(
'QQBoard',
7,
'Bucket Reason',
'CodingMan')
def search_user():
user_list = ptt_bot.search_user(
'abcd',
min_page=1,
max_page=2
)
print(user_list)
print(len(user_list))
# if 'abcd0800' in userlist:
# print('exist')
# else:
# print('Not exist')
def get_mail():
mail_index = ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)
ptt_bot.log(
'最新信件編號',
mail_index)
for i in reversed(range(1, mail_index + 1)):
ptt_bot.log(
'檢查信件編號',
i)
mail_info = ptt_bot.get_mail(i)
print(mail_info.title)
for _ in range(3):
newest_index = ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)
print(f'最新信箱編號 {newest_index}')
mail_info = ptt_bot.get_mail(newest_index)
if mail_info is not None:
print(mail_info.author)
mail_index = ptt_bot.get_newest_index(
PTT.data_type.index_type.MAIL,
search_type=PTT.data_type.mail_search_type.KEYWORD,
search_condition='uPtt system')
ptt_bot.log(
'最新信件編號',
mail_index)
for i in reversed(range(1, mail_index + 1)):
ptt_bot.log(
'檢查信件編號',
i)
mail_info = ptt_bot.get_mail(
i,
search_type=PTT.data_type.mail_search_type.KEYWORD,
search_condition='uPtt system')
print(mail_info.title)
search_list = [
(PTT.data_type.mail_search_type.KEYWORD, 'uPtt'),
(PTT.data_type.mail_search_type.KEYWORD, 'key')
]
mail_index = ptt_bot.get_newest_index(
PTT.data_type.index_type.MAIL,
search_list=search_list)
for i in reversed(range(1, mail_index + 1)):
ptt_bot.log(
'檢查信件編號',
i)
mail_info = ptt_bot.get_mail(
i,
search_list=search_list)
print(mail_info.title)
def mail_recviver():
while True:
# ptt_bot.config.log_level = PTT.log.level.TRACE
newest_index = ptt_bot.get_newest_index(PTT.data_type.index_type.MAIL)
# ptt_bot.config.log_level = PTT.log.level.INFO
ptt_bot.log(f'最新信箱編號 {newest_index}')
#
# user = ptt_bot.get_user(ptt_id)
# ptt_bot.log(f'信箱狀態: {user.mail_status}')
for index in range(1, newest_index + 1):
mail_info = ptt_bot.get_mail(newest_index)
print(mail_info.author)
print(mail_info.content)
ptt_bot.del_mail(index)
print('完成休息')
time.sleep(3)
def change_pw():
ptt_bot.change_pw(password)
if __name__ == '__main__':
print('Welcome to PyPtt v ' + PTT.version.V + ' test case')
try:
# init()
# threading_test()
ptt_bot = PTT.API(
# log_level=PTT.log.level.TRACE,
# log_level=PTT.log.level.DEBUG,
# host=PTT.data_type.host_type.PTT2
# for 本機測試
# connect_mode=PTT.connect_core.connect_mode.TELNET,
# host=PTT.data_type.host_type.LOCALHOST,
# port=8888,
# for 自定義 url 測試
# connect_mode=PTT.connect_core.connect_mode.TELNET,
# host='localhost',
# port=8888,
# language=PTT.i18n.language.ENGLISH
)
if ptt_bot.config.host == PTT.data_type.host_type.PTT1:
ptt_id, password = get_password('account_pt<PASSWORD>')
else:
ptt_id, password = get_password('account_ptt<PASSWORD>')
try:
ptt_bot.login(
ptt_id,
password,
# kick_other_login=True
)
except PTT.exceptions.LoginError:
ptt_bot.log('登入失敗')
sys.exit()
except PTT.exceptions.WrongIDorPassword:
ptt_bot.log('帳號密碼錯誤')
sys.exit()
except PTT.exceptions.LoginTooOften:
ptt_bot.log('請稍等一下再登入')
sys.exit()
if ptt_bot.unregistered_user:
print('未註冊使用者')
if ptt_bot.process_picks != 0:
print(f'註冊單處理順位 {ptt_bot.process_picks}')
if ptt_bot.registered_user:
print('已註冊使用者')
###################################
###################################
# performance_test()
# get_post()
# get_post_with_condition()
# post()
# get_newest_index()
# crawl_board()
# crawl_board_with_condition()
# push()
# get_user()
# throw_waterball()
# get_waterball()
# call_status()
# give_money()
# mail()
# has_new_mail()
# get_board_list()
# get_board_info()
# reply_post()
# get_favourite_board()
# search_user()
# get_mail()
# mail_recviver()
# change_pw()
# get_aid_from_url()
# get_bottom_post_list()
# del_post()
# bucket()
# set_board_title()
# mark_post()
except Exception as e:
print(type(e))
traceback.print_tb(e.__traceback__)
print(e)
except KeyboardInterrupt:
pass
ptt_bot.logout()
|
finetuning/train_val.py | shubhaankargupta/FractalDB-Pretrained-ResNet-PyTorch | 126 | 12736816 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 05 23:55:12 2018
@author: <NAME>, <NAME>
"""
import sys
import numpy as np
import torch
import torch.nn as nn
# Training
def train(args, model, device, train_loader, optimizer, epoch, iteration):
model.train()
criterion = nn.CrossEntropyLoss(size_average=True) # previous PyTorch ver.
#criterion = nn.CrossEntropyLoss(reduction='sum')
for i_batch, sample_batched in enumerate(train_loader):
data, target = sample_batched["image"].to(device), sample_batched["label"].to(device)
optimizer.zero_grad()
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct = pred.eq(target.view_as(pred)).sum().item()
loss = criterion(output, target)
loss.backward()
optimizer.step()
if i_batch % args.log_interval == 0:
sys.stdout.write("\repoch:{0:>3} iteration:{1:>6} train_loss: {2:.6f} train_accracy: {3:5.2f}%".format(
epoch, iteration, loss.item(), 100.*correct/float(len(sample_batched["label"]))))
sys.stdout.flush()
iteration += 1
# Validation
def val(args, model, device, test_loader, iteration):
model.eval()
criterion = nn.CrossEntropyLoss(size_average=False) # previous PyTorch ver.
#criterion = nn.CrossEntropyLoss(reduction='sum')
test_loss = 0
correct = 0
with torch.no_grad():
for i_batch, sample_batched in enumerate(test_loader):
data, target = sample_batched["image"].to(device), sample_batched["label"].to(device)
output = model(data)
test_loss += criterion(output, target).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= float(len(test_loader.dataset))
correct /= float(len(test_loader.dataset))
print("\nValidation: Accuracy: {0:.2f}% test_loss: {1:.6f}".format(100. * correct, test_loss))
return test_loss, 100. * correct
|
src/pretix/base/exporters/mail.py | fabm3n/pretix | 1,248 | 12736906 | <reponame>fabm3n/pretix
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from collections import OrderedDict
from django import forms
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
from pretix.base.models import OrderPosition
from ..exporter import BaseExporter
from ..models import Order
from ..signals import (
register_data_exporters, register_multievent_data_exporters,
)
class MailExporter(BaseExporter):
identifier = 'mailaddrs'
verbose_name = _('Email addresses (text file)')
def render(self, form_data: dict):
qs = Order.objects.filter(event__in=self.events, status__in=form_data['status']).prefetch_related('event')
addrs = qs.values('email')
pos = OrderPosition.objects.filter(
order__event__in=self.events, order__status__in=form_data['status']
).values('attendee_email')
data = "\r\n".join(set(a['email'] for a in addrs if a['email'])
| set(a['attendee_email'] for a in pos if a['attendee_email']))
if self.is_multievent:
return '{}_pretixemails.txt'.format(self.events.first().organizer.slug), 'text/plain', data.encode("utf-8")
else:
return '{}_pretixemails.txt'.format(self.event.slug), 'text/plain', data.encode("utf-8")
@property
def export_form_fields(self):
return OrderedDict(
[
('status',
forms.MultipleChoiceField(
label=_('Filter by status'),
initial=[Order.STATUS_PENDING, Order.STATUS_PAID],
choices=Order.STATUS_CHOICE,
widget=forms.CheckboxSelectMultiple,
required=True
)),
]
)
@receiver(register_data_exporters, dispatch_uid="exporter_mail")
def register_mail_export(sender, **kwargs):
return MailExporter
@receiver(register_multievent_data_exporters, dispatch_uid="multiexporter_mail")
def register_multievent_mail_export(sender, **kwargs):
return MailExporter
|
tests/test_example_001.py | benediktkr/python-terrascript | 507 | 12736942 | import terrascript
import terrascript.provider
import terrascript.resource
import tests.shared
def test_example_001():
config = terrascript.Terrascript()
config += terrascript.provider.aws(region="us-east-1", version="~> 2.0")
config += terrascript.resource.aws_vpc("example", cidr_block="10.0.0.0/16")
tests.shared.assert_deep_equal(config, "test_001.tf.json")
|
torch_dreams/transforms.py | Tiamat-Tech/torch-dreams | 214 | 12736973 | import torchvision.transforms as transforms
import torch.nn as nn
import random
from .image_transforms import resize_4d_tensor_by_factor, resize_4d_tensor_by_size
imagenet_transform = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std= [0.229, 0.224, 0.225]
)
class random_resize(nn.Module):
def __init__(self, max_size_factor, min_size_factor):
super().__init__()
self.max_size_factor = max_size_factor
self.min_size_factor = min_size_factor
def forward(self, x):
# size = random.randint(a = 300, b = 600)
# resized= resize_4d_tensor_by_size(x = x, height = size, width = size)
height_factor = random.uniform(a = self.min_size_factor , b = self.max_size_factor)
width_factor = random.uniform(a = self.min_size_factor , b = self.max_size_factor)
resized = resize_4d_tensor_by_factor(x = x, height_factor = height_factor, width_factor = width_factor)
return resized
class pair_random_resize(nn.Module):
def __init__(self, max_size_factor, min_size_factor):
super().__init__()
self.max_size_factor = max_size_factor
self.min_size_factor = min_size_factor
def forward(self, tensors = []):
height_factor = random.uniform(a = self.min_size_factor , b = self.max_size_factor)
width_factor = random.uniform(a = self.min_size_factor , b = self.max_size_factor)
outputs = []
for x in tensors:
resized_tensor = resize_4d_tensor_by_factor(x = x, height_factor = height_factor, width_factor = width_factor)
outputs.append(resized_tensor)
return outputs
class pair_random_affine(nn.Module):
def __init__(self, degrees, translate_x, translate_y):
super().__init__()
self.degrees = degrees
self.translate_x = translate_x
self.translate_y = translate_y
self.affine = transforms.RandomAffine(degrees = self.degrees, translate= (self.translate_x, self.translate_y))
def forward(self, tensors = []):
params = self.affine.get_params(degrees = (-self.degrees, self.degrees), translate= (self.translate_x, self.translate_y), scale_ranges = (1,1), shears = (0,0), img_size = (tensors[0].shape[-2], tensors[0].shape[1]))
outputs = []
for x in tensors:
affined = transforms.functional.affine(x, *params)
outputs.append(affined)
return outputs
|
webservices/common/models/elections.py | 18F/openFEC | 246 | 12736986 | from .base import db
from webservices import docs
class ElectionResult(db.Model):
__tablename__ = 'ofec_election_result_mv'
election_yr = db.Column(db.Integer, primary_key=True, doc=docs.ELECTION_YEAR)
cand_office = db.Column(db.String, primary_key=True, doc=docs.OFFICE)
cand_office_st = db.Column(db.String, primary_key=True, doc=docs.STATE_GENERIC)
cand_office_district = db.Column(db.String, primary_key=True, doc=docs.DISTRICT)
election_type = db.Column(db.String)
fec_election_yr = db.Column(db.Integer)
cand_id = db.Column(db.String, doc=docs.CANDIDATE_ID)
cand_name = db.Column(db.String, doc=docs.CANDIDATE_NAME)
class ElectionsList(db.Model):
__tablename__ = 'ofec_elections_list_mv'
idx = db.Column(db.Integer, primary_key=True)
sort_order = db.Column(db.Integer)
office = db.Column(db.String, doc=docs.OFFICE)
state = db.Column(db.String, doc=docs.STATE_GENERIC)
district = db.Column(db.String, doc=docs.DISTRICT)
cycle = db.Column(db.Integer)
incumbent_id = db.Column(db.String, doc=docs.CANDIDATE_ID)
incumbent_name = db.Column(db.String, doc=docs.CANDIDATE_NAME)
class ZipsDistricts(db.Model):
__table_args__ = {'schema': 'staging'}
__tablename__ = 'ref_zip_to_district'
zip_district_id = db.Column(db.Integer, primary_key=True)
district = db.Column(db.String, doc=docs.DISTRICT)
zip_code = db.Column(db.String)
state_abbrevation = db.Column(db.String)
active = db.Column(db.String)
|
test/test_ops.py | pytorch/functorch | 423 | 12737024 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from torch.testing._internal.common_utils import TestCase, run_tests, is_iterable_of_tensors
import torch
from torch import Tensor
import functools
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_device_type import ops
from torch.testing._internal.common_device_type import \
toleranceOverride, tol
from functorch_lagging_op_db import functorch_lagging_op_db
from functorch_additional_op_db import additional_op_db
from common_utils import (
get_fallback_and_vmap_exhaustive,
get_exhaustive_batched_inputs,
xfail,
skip,
skipOps,
tol1,
# tol2,
opsToleranceOverride,
check_vmap_fallback,
)
import unittest
from torch.utils._pytree import tree_flatten, tree_unflatten, tree_map
from functorch import grad, vjp, vmap, jacrev, jacfwd
import torch.autograd.forward_ad as fwAD
from functorch._src.eager_transforms import _as_tuple, jvp
aten = torch.ops.aten
# Version of autograd.grad that handles outputs that don't depend on inputs
def _autograd_grad(outputs, inputs, grad_outputs=None, retain_graph=False, create_graph=True):
inputs, inputs_spec = tree_flatten(inputs)
result = [torch.zeros_like(inp) for inp in inputs]
diff_argnums = tuple(i for i, inp in enumerate(inputs) if inp.requires_grad)
inputs = tuple(inputs[i] for i in diff_argnums)
if grad_outputs is None:
diff_outputs = tuple(out for out in outputs if out.requires_grad)
else:
something = [(out, go) for out, go in zip(outputs, grad_outputs)
if out.requires_grad]
if len(something) == 0:
diff_outputs, grad_outputs = (), ()
else:
diff_outputs, grad_outputs = zip(*something)
if len(diff_outputs) == 0:
return tuple(torch.zeros_like(inp) for inp in inputs)
grad_inputs = torch.autograd.grad(diff_outputs, inputs, grad_outputs,
retain_graph=retain_graph,
create_graph=create_graph,
allow_unused=True)
grad_inputs = tuple(torch.zeros_like(inp) if gi is None else gi
for gi, inp in zip(grad_inputs, inputs))
for idx, grad_inp in zip(diff_argnums, grad_inputs):
result[idx] = grad_inp
return tree_unflatten(result, inputs_spec)
def diff_arg(arg, requires_grad=True):
def is_differentiable_arg(arg):
if requires_grad:
return arg.requires_grad
else:
return arg.is_floating_point() or arg.is_complex()
if is_iterable_of_tensors(arg):
if all([is_differentiable_arg(a) for a in arg]):
return True
if all([not is_differentiable_arg(a) for a in arg]):
return False
raise RuntimeError("NYI: The test runner can't handle this")
return isinstance(arg, Tensor) and is_differentiable_arg(arg)
# Given f, returns an f' such that:
# - f' takes only positional arguments
# - All arguments to f' are floating-point Tensors
# - All outputs of f' are floating-point Tensors
def normalize_op_input_output2(f, args, kwargs, output_process_fn_grad=None, requires_grad=True):
flat_args, args_spec = tree_flatten(args)
diff_argnums = tuple(i for i, arg in enumerate(flat_args) if diff_arg(arg, requires_grad=requires_grad))
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args = tree_unflatten(_args, args_spec)
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
# TODO: Remove the following hack for namedtuples
result = tuple(result)
result = tuple(r for r in result if torch.is_floating_point(r))
assert len(result) > 0
return result
return wrapped, primals
# TODO: consolidate with normalize_op_input_output2
def normalize_op_input_output3(f, args, kwargs, sample_args, output_process_fn_grad=None):
flat_args, args_spec = tree_flatten(args)
flat_sample_args, _ = tree_flatten(sample_args)
diff_argnums = tuple(i for i, (arg, sample) in enumerate(zip(flat_args, flat_sample_args))
if diff_arg(sample, requires_grad=True))
assert len(diff_argnums) > 0
primals = tuple(flat_args[i] for i in diff_argnums)
@functools.wraps(f)
def wrapped(*primals):
_args = list(flat_args)
for num, arg in zip(diff_argnums, primals):
_args[num] = arg
_args = tree_unflatten(_args, args_spec)
result = f(*_args, **kwargs)
if output_process_fn_grad is not None:
result = output_process_fn_grad(result)
if isinstance(result, tuple):
# TODO: Remove the following hack for namedtuples
result = tuple(result)
result = tuple(r for r in result if torch.is_floating_point(r))
assert len(result) > 0
return result
return wrapped, primals
def normalize_op_input_output(f, sample, requires_grad=True):
args = tuple([sample.input] + list(sample.args))
return normalize_op_input_output2(
f, args, sample.kwargs, sample.output_process_fn_grad, requires_grad=requires_grad
)
def ref_vjp(f, *primals):
result = f(*primals)
def wrapped(cotangents):
return _autograd_grad(_as_tuple(result), primals, _as_tuple(cotangents))
return result, wrapped
def simulate_jvp(f, primals, tangents):
primals_out, tangents_out = torch.autograd.functional.jvp(f, primals, tangents)
return primals_out, tangents_out
def ref_jvp(f, primals, tangents):
with fwAD.dual_level():
duals = tuple(fwAD.make_dual(p, t) for p, t in zip(primals, tangents))
result_duals = f(*duals)
result_duals, spec = tree_flatten(result_duals)
primals_out, tangents_out = zip(*(fwAD.unpack_dual(d) for d in result_duals))
return tree_unflatten(primals_out, spec), tree_unflatten(tangents_out, spec)
def get_sample_cotangents(f, sample):
fn, primals = normalize_op_input_output(f, sample)
output = fn(*primals)
return tree_map(torch.randn_like, output)
# returns a new function g(*args, *cotangents)
# that computes vjps and (*args, cotangents)
def get_vjp_fn_and_args_with_cotangents(f, sample, cotangents):
args = tuple([sample.input] + list(sample.args))
kwargs = sample.kwargs
flat_args, args_spec = tree_flatten(args)
flat_cotangents, cotangents_spec = tree_flatten(cotangents)
@functools.wraps(f)
def wrapped(*args):
assert len(args) == len(flat_args) + len(flat_cotangents)
actual_args = args[:len(flat_args)]
cotangents = args[len(flat_args):]
actual_args = tree_unflatten(actual_args, args_spec)
cotangents = tree_unflatten(cotangents, cotangents_spec)
fn, primals = normalize_op_input_output3(f, actual_args, kwargs,
flat_args,
sample.output_process_fn_grad)
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
return wrapped, tuple(flat_args + flat_cotangents)
# returns a new function g(*args, *cotangents)
# that computes vjps and (*args, cotangents) using torch.autograd.grad
def get_autograd_fn_and_args_with_cotangents(f, sample, cotangents):
args = tuple([sample.input] + list(sample.args))
kwargs = sample.kwargs
flat_args, args_spec = tree_flatten(args)
flat_cotangents, cotangents_spec = tree_flatten(cotangents)
@functools.wraps(f)
def wrapped(*args):
assert len(args) == len(flat_args) + len(flat_cotangents)
actual_args = args[:len(flat_args)]
cotangents = args[len(flat_args):]
actual_args = tree_unflatten(actual_args, args_spec)
cotangents = tree_unflatten(cotangents, cotangents_spec)
fn, primals = normalize_op_input_output3(f, actual_args, kwargs,
flat_args,
sample.output_process_fn_grad)
out = fn(*primals)
diff_wrt = tuple(primal for primal in primals if (primal.requires_grad or primal.grad_fn is not None))
if diff_wrt:
return torch.autograd.grad(out, diff_wrt, grad_outputs=cotangents)
else:
return (torch.ones(()),) # uuugh hack...this will need to be more generic
return wrapped, tuple(flat_args + flat_cotangents)
# Returns a new function g(*args, *cotangents) that computes vjps and
# sample (*args, *cotangents)
def get_vjpfull_variant(f, sample):
fn, primals = normalize_op_input_output(f, sample)
result = fn(*primals)
cotangents = _as_tuple(
tree_map(lambda x: torch.randn_like(x, requires_grad=True), result))
num_primals = len(primals)
args = (*primals, *cotangents)
@functools.wraps(f)
def wrapped(*args):
primals = args[:num_primals]
cotangents = args[num_primals:]
result, vjp_fn = vjp(fn, *primals)
if isinstance(result, torch.Tensor):
assert len(cotangents) == 1
cotangents = cotangents[0]
return vjp_fn(cotangents)
return wrapped, args
def get_jvp_variant(f, sample):
# We want this higher-order variant of jvp, so that it can
# be used to wrap vmap
fn, primals = normalize_op_input_output(f, sample, requires_grad=False)
tangents = _as_tuple(
tree_map(lambda x: torch.randn_like(x), primals))
@functools.wraps(f)
def wrapped(*args):
tangents = args
primals_out, tangents_out = jvp(fn, primals, tangents)
if isinstance(primals_out, torch.Tensor):
return (primals_out, tangents_out)
else:
flat_primals_out, _ = tree_flatten(primals_out)
flat_tangents_out, _ = tree_flatten(tangents_out)
return tuple(flat_primals_out + flat_tangents_out)
return wrapped, tangents
def get_jvp_variant_primals_tangents(f, sample):
# We want this higher-order variant of jvp, so that it can
# be used to wrap vmap
fn, primals = normalize_op_input_output(f, sample, requires_grad=False)
tangents = _as_tuple(
tree_map(lambda x: torch.randn_like(x), primals))
@functools.wraps(f)
def wrapped(*args):
primals_in = args[:len(primals)]
tangents_in = args[len(primals):]
primals_out, tangents_out = jvp(fn, primals_in, tangents_in)
if isinstance(primals_out, torch.Tensor):
return (primals_out, tangents_out)
else:
flat_primals_out, _ = tree_flatten(primals_out)
flat_tangents_out, _ = tree_flatten(tangents_out)
return tuple(flat_primals_out + flat_tangents_out)
return wrapped, primals + tangents
def is_inplace(op, variant):
if hasattr(variant, "__wrapped__"):
return variant.__wrapped__ is op.get_inplace()
return variant is op.get_inplace()
vjp_fail = {
skip('nn.functional.dropout'), # randomness testing artifact
skip('nn.functional.rrelu'), # randomness testing artifact
skip('bernoulli'), # randomness testing artifact
skip('normal', ''), # randomness testing artifact
skip('normal', 'number_mean'), # randomness testing artifact
xfail('tensor_split'),
xfail('to_sparse'),
xfail('nn.functional.ctc_loss'),
skip('nn.functional.feature_alpha_dropout', 'with_train'), # fails on cuda, runs okay on cpu
skip('nn.functional.feature_alpha_dropout', 'without_train'), # fails on cuda, runs okay on cpu
skip('pca_lowrank', ''), # fails on cuda, runs okay on cpu
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
skip('nn.functional.dropout2d', ''), # fails on cuda, runs okay on cpu
}
class TestOperators(TestCase):
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_grad', vjp_fail.union({
skip('nn.functional.fractional_max_pool2d'), # fails on cuda, runs okay on cpu
skip('nn.functional.fractional_max_pool3d'), # fails on cuda, runs okay on cpu
}))
@opsToleranceOverride('TestOperators', 'test_grad', (
tol1('nn.functional.binary_cross_entropy_with_logits',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}),
))
def test_grad(self, device, dtype, op):
if op.name in vjp_fail:
self.skipTest("Skipped; Expected failures")
return
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
diff_argnums = tuple(i for i, arg in enumerate(args) if diff_arg(arg))
assert len(diff_argnums) > 0
diff_args = tuple(args[i] for i in diff_argnums)
def wrapped_fn(*args, **kwargs):
result = op(*args, **kwargs)
if sample.output_process_fn_grad is not None:
result = sample.output_process_fn_grad(result)
# Reduce into single value for grad
if isinstance(result, torch.Tensor):
return result.sum()
result = sum([res.sum() for res in result])
return result
result = grad(wrapped_fn, diff_argnums)(*args, **kwargs)
expected = _autograd_grad(_as_tuple(wrapped_fn(*args, **kwargs)), diff_args)
self.assertEqual(result, expected)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_jvp', set({
skip('nn.functional.dropout'), # randomness testing artifact; not actually a problem
skip('nn.functional.rrelu'), # randomness testing artifact; not actually a problem
skip('nn.functional.fractional_max_pool2d'), # fails on cuda, runs okay on cpu
skip('nn.functional.fractional_max_pool3d'), # fails on cuda, runs okay on cpu
skip('nn.functional.max_pool1d'), # fails on cpu, runs okay on cuda
skip('nn.functional.feature_alpha_dropout', 'with_train'), # fails on cuda, runs okay on cpu
skip('nn.functional.feature_alpha_dropout', 'without_train'), # fails on cuda, runs okay on cpu
skip('pca_lowrank', ''), # fails on cuda, runs okay on cpu
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
skip('nn.functional.dropout2d', ''), # fails on cuda, runs okay on cpu
# The following don't have a forward-mode AD formula in PyTorch core
# (check derivatives.yaml).
xfail('var_mean'),
xfail('std_mean'),
# =============================================
# NB: The above failures also fail using PyTorch core's
# forward-mode AD and vmap.
# The failures below are functorch-specific issues
# =============================================
# Composite ops that do bad things. Need to be fixed in PyTorch core.
# RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
xfail('tensor_split'),
skip('bernoulli'), # cuda set seed randomness issues
# BUG: runs and produces numerical differences
skip('nn.functional.max_unpool1d'), # fails everywhere except on mac
skip('nn.functional.max_unpool2d'), # fails everywhere except on windows
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
}))
@opsToleranceOverride('TestOperators', 'test_jvp', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=1e-04, rtol=1.3e-06)}, device_type='cuda'),
tol1('nn.functional.binary_cross_entropy_with_logits',
{torch.float32: tol(atol=4e-04, rtol=4e-04)}),
))
def test_jvp(self, device, dtype, op):
# TODO: when we change supports_autograd to supports_backward_ad, also change in this file
VJP_DECOMP = {
'nn.functional.logsigmoid',
}
if op.name in VJP_DECOMP:
ref_jvp_local = simulate_jvp
else:
ref_jvp_local = ref_jvp
if not op.supports_forward_ad and op.name not in VJP_DECOMP:
self.skipTest("Skipped! Forward AD not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
# NB: we used requires_grad=True to determine where the primals are,
# but don't need that information otherwise
fn, primals = normalize_op_input_output(op, sample, requires_grad=True)
primals = tree_map(lambda x: x.detach(), primals)
tangents = tree_map(lambda x: torch.randn_like(x), primals)
primal_outs, tangent_outs = jvp(fn, primals, tangents)
expected_primal_outs, expected_tangent_outs = ref_jvp_local(fn, primals, tangents)
self.assertEqual(primal_outs, expected_primal_outs)
self.assertEqual(tangent_outs, expected_tangent_outs)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vjp', vjp_fail.union({
skip('nn.functional.fractional_max_pool2d'), # fails on cpu, runs okay on cuda
skip('nn.functional.fractional_max_pool3d'), # fails on cpu, runs okay on cuda
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('pca_lowrank', ''),
xfail('nn.functional.dropout2d', ''),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('svd_lowrank', ''),
}))
@opsToleranceOverride('TestOperators', 'test_vjp', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=5e-05, rtol=9e-05)}, device_type='cuda'),
tol1('nn.functional.binary_cross_entropy_with_logits',
{torch.float32: tol(atol=1e-04, rtol=1e-04)}),
))
def test_vjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
def _test(_op):
for sample in samples:
fn, primals = normalize_op_input_output(_op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
out, vjp_fn = vjp(fn, *primals)
self.assertEqual(out, result)
result_vjps = vjp_fn(cotangents)
_, vjp_fn = ref_vjp(fn, *primals)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
_test(op)
for a_op in op.aliases:
_test(a_op)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vjpvjp', vjp_fail.union({
skip('nn.functional.max_unpool1d'), # Flaky
skip('nn.functional.max_unpool2d'), # Flaky
skip('nn.functional.fractional_max_pool2d'), # randomness
skip('nn.functional.fractional_max_pool3d'), # randomness
}))
@opsToleranceOverride('TestOperators', 'test_vjpvjp', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=5e-05, rtol=9e-05)}, device_type='cuda'),
))
def test_vjpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
if not op.supports_gradgrad:
self.skipTest("Skipped! Operation does not support gradgrad")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, args = get_vjpfull_variant(op, sample)
result = fn(*args)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
# Compute vjp of vjp
_, vjp_fn = vjp(fn, *args)
result_vjps = vjp_fn(cotangents)
# Compute ref_vjp of vjp. We could have done ref_vjp of ref_vjp,
# but since we're confident that vjp works by itself, this is
# an equivalent way to test that.
_, vjp_fn = ref_vjp(fn, *args)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
def test_vmapvjpvjp(self, device, dtype, op):
self.skipTest("Skipped; these tests take too long")
op_skip = set({
})
op_skip = op_skip.union(vjp_fail)
if op.name in op_skip:
self.skipTest("Skipped; Expected failures")
return
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
if not op.supports_gradgrad:
self.skipTest("Skipped! Operation does not support gradgrad")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, args = get_vjpfull_variant(op, sample)
result = fn(*args)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
cotangents, _ = tree_flatten(cotangents)
num_args = len(args)
args_and_cotangents = tuple(args) + tuple(cotangents)
def vjp_of_vjp(*args_and_cotangents):
args = args_and_cotangents[:num_args]
cotangents = args_and_cotangents[num_args:]
result, vjp_fn = vjp(fn, *args)
result_vjps = vjp_fn(cotangents)
result, _ = tree_flatten(result)
result_vjps, _ = tree_flatten(result_vjps)
return (*result, *result_vjps)
generator = get_fallback_and_vmap_exhaustive(vjp_of_vjp, args_and_cotangents, {}, opinfo=op)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
vmapvjp_fail = vjp_fail.union({
# The following are not bugs and are expected behavior
xfail('masked_select'), # Not possible due to dynamic shapes
skip('bernoulli'), # randomness
skip('normal', ''), # randomness
skip('normal', 'number_mean'), # randomness
xfail('nn.functional.dropout'), # randomness
xfail('as_strided'), # as_strided is too wild for us to support, wontfix
xfail('index_put', ''), # not possible due to dynamic shapes; we support a subset
xfail('masked_scatter'), # dynamic
xfail('nn.functional.fractional_max_pool2d'), # random
xfail('nn.functional.fractional_max_pool3d'), # random
xfail('take'), # dynamic
# All of the following are bugs and need to be fixed
skip('linalg.svdvals'), # # really annoying thing where it passes correctness check but not has_batch_rule
xfail('__getitem__', ''), # dynamic error
xfail('_masked.prod'), # calls aten::item
xfail('eig'), # calls aten::item
xfail('linalg.det', ''), # calls .item()
xfail('linalg.eig'), # Uses aten::allclose
xfail('linalg.eigh'), # needs diag_scatter
xfail('linalg.householder_product'), # needs select_scatter
xfail('linalg.slogdet'), # calls .item()
xfail('logdet'), # calls .item()
xfail('matrix_exp'), # would benefit from narrow_scatter
xfail('nanquantile'), # checks q via a .item() call
xfail('nn.functional.gaussian_nll_loss'), # checks var for if any value < 0
xfail('prod'), # calls nonzero
xfail('put'),
xfail('quantile'), # checks q via a .item() call
xfail('stft'),
xfail('symeig'), # would benefit from diag_scatter
xfail('view_as_complex'),
# required rank 4 tensor to use channels_last format
xfail('bfloat16'),
xfail('double'),
xfail('float'),
xfail('half'),
xfail('scatter_reduce', 'prod'), # item call
# NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format
xfail('nn.functional.max_unpool2d'),
xfail('nn.functional.max_unpool2d', 'grad'),
})
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@opsToleranceOverride('TestOperators', 'test_vmapvjp', (
tol1('linalg.svd',
{torch.float32: tol(atol=1.5e-04, rtol=1e-04)}, device_type="cuda"),
tol1('svd',
{torch.float32: tol(atol=1.5e-04, rtol=1e-04)}, device_type="cuda"),
))
@skipOps('TestOperators', 'test_vmapvjp', vmapvjp_fail)
def test_vmapvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(fn, args, {}, opinfo=op):
self.assertEqual(loop_out, batched_out)
# There are several variations we care about
# 1) primal batched (TODO)
# 2) tangent batched (batched grads) <--
# 3) both batched (TODO)
# The below tests (2) only.
@ops(functorch_lagging_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestOperators', 'test_vmapjvp', {
skip('nn.functional.dropout'), # randomness
skip('nn.functional.rrelu'), # randomness
skip('nn.functional.fractional_max_pool2d'), # randomness
skip('nn.functional.fractional_max_pool3d'), # randomness
skip('bernoulli', ''), # randomness
skip('nn.functional.max_pool1d'), # fails on cpu, runs on cuda
# TODO: fails in core due to in-place batched nto non-batched
# but fails here for a different reason
xfail('linalg.householder_product'),
# Try to in-place batched tensor into non-batched tensor
xfail('matrix_exp'),
# Apprently these support forward AD, but we get "Trying to use forward AD..."
# These are cases where OpInfo has supports_forward_ad=True, but disables
# the test
xfail('var_mean'),
xfail('std_mean'),
# RuntimeError: expand: the number of sizes provided (1) must be greater or
# equal to the number of dimensions in the tensor (2)
xfail('nanquantile'),
xfail('quantile'),
# Not implemented
xfail('scatter'),
# =============================================
# NB: The above failures also fail in PyTorch core.
# The failures below only fail in functorch
# =============================================
# Composite ops that do bad things. Need to be fixed in PyTorch core.
# RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
xfail('tensor_split'),
# Causing multiple forward mode AD issues, needs investigation
xfail('nn.functional.batch_norm'),
xfail('nn.functional.batch_norm', 'without_cudnn', device_type='cuda'),
skip('nn.functional.feature_alpha_dropout', 'with_train'),
skip('pca_lowrank', ''),
skip('nn.functional.dropout2d', ''),
skip('nn.functional.feature_alpha_dropout', 'without_train'),
skip('svd_lowrank', ''),
xfail('nn.functional.soft_margin_loss', ''),
xfail('stft'), # something weird is happening with shapes
xfail('double'), # required rank 4 tensor to use channels_last format
# BUG: runs and produces numerical differences
skip('nn.functional.max_unpool1d', device_type='cpu'), # fails everywhere except on mac
skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
xfail('put'), # calls put_ during vmap with only vmaps over other, not self
})
def test_vmapjvp(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple([*arg_values, *kwarg_values])
fn, args = get_jvp_variant(op, sample)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(fn, args, {}, opinfo=op, bdims=(0,)):
self.assertEqual(loop_out, batched_out)
vmapjvpall_fail = {
# The following are expected (not a bug)
skip('bernoulli', ''), # randomness
skip('nn.functional.dropout'), # randomness
skip('nn.functional.rrelu'), # randomness
skip('nn.functional.dropout2d', ''),
skip('nn.functional.feature_alpha_dropout', 'without_train'),
skip('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('nn.functional.fractional_max_pool2d'), # Cannot access data pointer of Tensor that doesn't have storage
xfail('nn.functional.fractional_max_pool3d'), # Cannot access data pointer of Tensor that doesn't have storage
# The following are bugs that we should fix
skip('nn.functional.max_pool1d'), # fails on cpu, runs on cuda
xfail('nn.functional.batch_norm', device_type='cuda'),
xfail('nn.functional.batch_norm', 'without_cudnn', device_type='cuda'),
xfail('_masked.mean'),
xfail('_masked.prod'),
# Causing issues with multiple cpu levels of forward mode AD
xfail('nn.functional.batch_norm', device_type='cpu'),
# https://github.com/pytorch/functorch/issues/857
skip('nn.functional.embedding', ''),
xfail('nn.functional.soft_margin_loss', ''),
xfail('nn.functional.binary_cross_entropy_with_logits', ''),
xfail('linalg.householder_product'),
xfail('tensor_split'),
xfail('quantile'),
xfail('var_mean'),
xfail('as_strided'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('std_mean'),
xfail('scatter'),
xfail('matrix_exp'),
xfail('nanquantile'),
xfail('view_as_complex'),
xfail('prod'),
skip('pca_lowrank', ''),
skip('svd_lowrank', ''),
xfail('stft'), # transpose_ fallback
xfail('double'), # required rank 4 tensor to use channels_last format
skip('nn.functional.max_unpool1d'), # Flaky, seems to sometimes his max_unpool2d
skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
xfail('put'), # calls put_ during vmap with only vmaps over other, not self
xfail('nn.functional.prelu'), # Call Tensor.as_strided
}
@ops(functorch_lagging_op_db, allowed_dtypes=(torch.float,))
@opsToleranceOverride('TestOperators', 'test_vmapjvpall', (
tol1('nn.functional.conv_transpose3d',
{torch.float32: tol(atol=2e-04, rtol=9e-3)}, device_type='cuda'),
))
@skipOps('TestOperators', 'test_vmapjvpall', vmapjvpall_fail)
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
# This is technically a superset of test_vmapjvp. We should either delete test_vmapjvp
# or figure out if we can split vmapjvpall. It's useful to keep test_vmapjvp intact
# because that coresponds to "batched forward-mode AD" testing in PyTorch core
def test_vmapjvpall(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple([*arg_values, *kwarg_values])
fn, args = get_jvp_variant_primals_tangents(op, sample)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(fn, args, {}, opinfo=op):
self.assertEqual(loop_out, batched_out)
@ops(functorch_lagging_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vmapjvpall_has_batch_rule', vmapjvpall_fail.union({
xfail('linalg.solve_triangular'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('lu'),
xfail('cumprod'),
xfail('lu_solve'),
xfail('linalg.lstsq', 'grad_oriented'),
xfail('linalg.cholesky'),
xfail('linalg.qr'),
xfail('cross'),
xfail('qr'),
xfail('linalg.pinv'),
xfail('masked_fill'),
xfail('copysign'),
xfail('linalg.solve'),
xfail('linalg.eig'),
xfail('complex'),
xfail('linalg.pinv', 'hermitian'),
xfail('pinverse'),
skip('_masked.mean'), # ???
xfail('linalg.cholesky_ex'),
xfail('masked_scatter'),
xfail('index_fill'),
xfail('take'),
xfail('linalg.eigvals'),
xfail('linalg.qr'),
xfail('linalg.tensorsolve'),
xfail('nn.functional.max_pool3d'),
xfail('vdot'),
xfail('linalg.cross'),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('linalg.lu_factor', ''),
xfail('nn.functional.dropout2d', ''),
xfail('nn.functional.kl_div', ''),
xfail('pca_lowrank', ''),
xfail('svd_lowrank', ''),
xfail('linalg.lu_factor_ex', ''),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('special.log_ndtr', ''),
xfail('fft.ihfft2'), # conj_physical fallback
xfail('fft.ihfftn'), # conj_physical fallback
xfail('istft'), # col2im fallback
xfail('polar'), # complex fallback
xfail('nn.functional.l1_loss', ''),
xfail('nn.functional.max_unpool3d', 'grad'),
xfail('nn.functional.smooth_l1_loss', ''),
xfail('nn.functional.max_unpool2d', 'grad'),
xfail('nn.functional.soft_margin_loss', ''),
xfail('nn.functional.binary_cross_entropy_with_logits', ''),
xfail('nn.functional.max_unpool1d', 'grad'),
xfail('nn.functional.embedding', ''),
xfail('lu_unpack'),
xfail('nn.functional.glu'),
xfail('nn.functional.bilinear'), # trilinear doesn't have batching rule
}))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
def test_vmapjvpall_has_batch_rule(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
def test():
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple([*arg_values, *kwarg_values])
fn, args = get_jvp_variant_primals_tangents(op, sample)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn, args, {}, opinfo=op, compute_loop_out=False):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps('TestOperators', 'test_vmapvjp_has_batch_rule', vmapvjp_fail.union({
xfail('view_as_complex'),
xfail('cholesky'),
xfail('complex'),
xfail('copysign'),
xfail('cummax'),
xfail('cummin'),
xfail('cumprod'),
xfail('eig'),
xfail('nansum'),
xfail('nanmean'),
xfail('fmin'),
xfail('fmax'),
xfail('special.log_ndtr'),
xfail('index_copy'),
xfail('index_fill'),
xfail('linalg.cholesky'),
xfail('linalg.cholesky_ex'),
xfail('linalg.det'),
xfail('linalg.eig'),
xfail('linalg.eigh'),
xfail('linalg.eigvals'),
xfail('linalg.householder_product'),
xfail('linalg.lstsq', ''),
xfail('linalg.lstsq', 'grad_oriented'),
xfail('linalg.pinv'),
xfail('linalg.qr'),
xfail('linalg.pinv', 'hermitian'),
xfail('linalg.slogdet'),
xfail('linalg.solve'),
xfail('logdet'),
xfail('lu'),
xfail('lu_solve'),
xfail('lu_unpack'),
xfail('masked_fill'),
xfail('masked_scatter'),
xfail('masked_select'),
xfail('matrix_exp'),
xfail('nanquantile'),
xfail('pinverse'),
xfail('prod'),
xfail('put'),
xfail('quantile'),
xfail('renorm'),
xfail('symeig'),
xfail('take'),
xfail('tensor_split'),
xfail('to_sparse'),
xfail('unfold'),
xfail('vdot'),
xfail('nn.functional.dropout'),
xfail('_masked.prod'),
xfail('fft.ihfft2'),
xfail('fft.ihfftn'),
xfail('cross'),
xfail('linalg.cross'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('nn.functional.huber_loss'),
xfail('nn.functional.poisson_nll_loss'),
xfail('nn.functional.bilinear'),
xfail('nn.functional.fractional_max_pool3d'),
xfail('as_strided'),
xfail('linalg.solve_triangular'),
xfail('stft'),
xfail('nn.functional.rrelu'),
xfail('nn.functional.embedding_bag'),
xfail('nn.functional.max_pool3d'),
xfail('istft'),
xfail('nn.functional.fractional_max_pool2d'),
xfail('linalg.tensorsolve'),
xfail('linalg.lu_factor', ''),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('nn.functional.kl_div', ''),
xfail('pca_lowrank', ''),
xfail('nn.functional.dropout2d', ''),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('svd_lowrank', ''),
xfail('linalg.lu_factor_ex', ''),
xfail('nn.functional.max_unpool2d', ''),
xfail('nn.functional.multi_margin_loss', ''),
xfail('nn.functional.multilabel_margin_loss', ''),
xfail('nn.functional.pdist', ''),
xfail('nn.functional.smooth_l1_loss', ''),
xfail('scatter_reduce', 'prod'),
xfail('scatter_reduce', 'amax'),
xfail('nn.functional.max_unpool1d', ''),
xfail('nn.functional.max_unpool3d', ''),
xfail('scatter_reduce', 'sum'),
xfail('scatter_reduce', 'mean'),
xfail('nn.functional.max_unpool3d', 'grad'),
xfail('nn.functional.soft_margin_loss', ''),
xfail('scatter_reduce', 'amin'),
xfail('nn.functional.max_unpool1d', 'grad'),
xfail('nn.functional.l1_loss', ''),
xfail('nn.functional.max_unpool2d', 'grad'),
xfail('qr'),
}))
def test_vmapvjp_has_batch_rule(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
def test():
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn, args, {}, opinfo=op, compute_loop_out=False):
pass
for a_op in op.aliases:
fn, args = get_vjp_fn_and_args_with_cotangents(a_op, sample, cotangents)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn, args, {}, opinfo=op, compute_loop_out=False):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_vjpvmap', vjp_fail.union({
skip('bernoulli', ''), # vjpvmap testing can't handle randomness
skip('normal', ''), # vjpvmap testing can't handle randomness
skip('normal', 'number_mean'), # vjpvmap testing can't handle randomness
# fallback path doesn't work
# All of the following are bugs and need to be fixed
xfail('__getitem__', ''),
xfail('index_put', ''),
xfail('matrix_exp'),
xfail('view_as_complex'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('masked_select'),
skip('nn.functional.fractional_max_pool3d'), # generator works on cpu, fails on cuda
xfail('__rpow__'), # https://github.com/pytorch/functorch/issues/617
xfail('as_strided'),
skip('nn.functional.fractional_max_pool2d'), # generator works on cpu, fails on cuda
xfail('column_stack', ''),
xfail('nn.functional.dropout2d', ''),
xfail('svd_lowrank', ''),
xfail('pca_lowrank', ''),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('clamp'),
# something weird happening with channels_last
xfail('bfloat16'),
xfail('double'),
xfail('float'),
xfail('half'),
}))
def test_vjpvmap(self, device, dtype, op):
# NB: there is no vjpvmap_has_batch_rule test because that is almost
# certainly redundant with the vmap_has_batch_rule test in test_vmap.py
# one-off skip
if op.name == 'nn.functional.dropout':
self.skipTest("Skipped!")
if not op.supports_autograd:
# If the op doesn't support autograd, vmap(op) won't either
self.skipTest("Skipped! Autograd not supported.")
return
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
batch_norm_fns = ("nn.functional.batch_norm", "nn.functional.instance_norm") # instance norm calls batch norm
is_batch_norm = op.name in batch_norm_fns
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = get_exhaustive_batched_inputs(args, kwargs, for_batch_norm=is_batch_norm)
for batched_args, in_dims, kwargs in generator:
vmapped_op = vmap(op, in_dims)
fn, primals = normalize_op_input_output2(vmapped_op, batched_args, kwargs,
sample.output_process_fn_grad)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
_, vjp_fn = vjp(fn, *primals)
result_vjps = vjp_fn(cotangents)
_, vjp_fn = ref_vjp(fn, *primals)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
def _compare_jacobians_of_vjp(self, fn, cotangents_and_primals, argnums=None, atol_rtol=None):
if argnums is None:
argnums = tuple(range(len(cotangents_and_primals)))
def get_vjp(cotangents, *primals):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
jacobian_jvp = jacfwd(get_vjp, argnums)(*cotangents_and_primals)
jacobian_vjp = jacrev(get_vjp, argnums)(*cotangents_and_primals)
# For dtype changing operations, the jacobians have different dtype.
jacobian_jvp = tree_map(lambda x: x.to(torch.float), jacobian_jvp)
jacobian_vjp = tree_map(lambda x: x.to(torch.float), jacobian_vjp)
if atol_rtol is not None:
(atol, rtol) = atol_rtol
self.assertEqual(jacobian_jvp, jacobian_vjp, atol=atol, rtol=rtol)
else:
self.assertEqual(jacobian_jvp, jacobian_vjp)
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@skipOps('TestOperators', 'test_jvpvjp', vjp_fail.union({
# These are weirdly non-deterministic
skip('nn.functional.fractional_max_pool2d'), # Random
skip('nn.functional.fractional_max_pool3d'), # Random
# RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor,
# this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3].
xfail('normal', ''),
xfail('_masked.amax', ''),
xfail('_masked.amin', ''),
xfail('_masked.log_softmax', ''),
xfail('_masked.softmax', ''),
xfail('_masked.softmin', ''),
xfail('amax', ''),
xfail('amin', ''),
xfail('cdist', ''),
xfail('cholesky', ''),
xfail('eig', ''),
xfail('linalg.det', ''),
xfail('linalg.matrix_norm', ''),
xfail('linalg.slogdet', ''),
xfail('logcumsumexp', ''),
xfail('logdet', ''),
xfail('nanmean', ''),
xfail('nansum', ''),
xfail('nn.functional.batch_norm', ''),
xfail('nn.functional.batch_norm', 'without_cudnn', device_type='cuda'),
xfail('nn.functional.embedding'),
xfail('nn.functional.embedding', 'functorch'),
xfail('nn.functional.embedding_bag', ''),
xfail('nn.functional.grid_sample', ''),
xfail('nn.functional.hardsigmoid', ''),
xfail('nn.functional.huber_loss', ''),
xfail('nn.functional.instance_norm', ''),
xfail('nn.functional.logsigmoid', ''),
xfail('nn.functional.pad', 'circular'),
xfail('nn.functional.softmin', ''),
xfail('nn.functional.softmin', 'with_dtype'),
xfail('renorm', ''),
xfail('std_mean', ''),
xfail('symeig', ''),
xfail('var_mean', ''),
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
xfail('nn.functional.kl_div', ''),
xfail('pca_lowrank', ''),
xfail('nn.functional.dropout2d', ''),
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
xfail('svd_lowrank', ''),
xfail('nn.functional.multilabel_margin_loss', ''),
xfail('nn.functional.multilabel_soft_margin_loss', ''),
xfail('scatter_reduce', 'amax'),
xfail('scatter_reduce', 'amin'),
xfail('nn.functional.soft_margin_loss', ''),
xfail('nn.functional.pdist', ''),
xfail('scatter_reduce', 'sum'),
xfail('nn.functional.multi_margin_loss', ''),
xfail('scatter_reduce', 'mean'),
xfail('scatter_reduce', 'prod'),
skip('linalg.householder_product', '', device_type='cuda'), # flaky, I'm not sure why
xfail('nn.functional.binary_cross_entropy_with_logits'),
}))
def test_jvpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, primals = normalize_op_input_output(op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
primals_tangents = tree_map(lambda x: torch.randn_like(x), primals)
cotangents_tangents = tree_map(lambda x: torch.randn_like(x), cotangents)
if isinstance(primals[0], torch.Tensor) and primals[0].numel() == 0:
# typically the first primal arg is the input. If the input has no elements, we will typically run
# into an issue of "Expected Tensor but got None"
continue
def push_vjp(primals, cotangents):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
result = jvp(push_vjp, (primals, cotangents), (primals_tangents, cotangents_tangents))
self.assertEqual(len(result), 2)
def tree_map2(fn, first, second):
flat_first, spec_first = tree_flatten(first)
flat_second, spec_second = tree_flatten(second)
assert spec_first == spec_second
flat_result = [fn(f, s) for f, s in zip(flat_first, flat_second)]
return tree_unflatten(flat_result, spec_first)
def reference(primals, cotangents, primals_tangents, cotangents_tangents):
with fwAD.dual_level():
primal_duals = tree_map2(fwAD.make_dual, primals, primals_tangents)
_, vjp_fn = ref_vjp(fn, *primal_duals)
cotangent_duals = tree_map2(fwAD.make_dual, cotangents, cotangents_tangents)
result = vjp_fn(cotangent_duals)
flat_result, spec = tree_flatten(result)
primals_out, tangents_out = zip(*[fwAD.unpack_dual(r) for r in flat_result])
tangents_out = [t if t is not None else torch.zeros_like(p)
for p, t in zip(primals_out, tangents_out)]
expected = (tree_unflatten(primals_out, spec), tree_unflatten(tangents_out, spec))
return expected
# HACK: obviously pytorch should also have the same coverage
# For things that do have the same coverage, we test that jvp x vjp
# are the same between PyTorch and functorch. For things that don't,
# we check that jacfwd(vjp) and jacrev(vjp) are the same. This results
# in slower tests.
FUNCTORCH_HAS_FORMULA_BUT_NOT_PYTORCH = {
'nn.functional.nll_loss',
'softmax',
'log_softmax',
'nn.functional.cross_entropy',
'nn.functional.layer_norm'
}
if op.name in FUNCTORCH_HAS_FORMULA_BUT_NOT_PYTORCH:
self.assertFalse(op.supports_fwgrad_bwgrad,
f"{op.name} now supports forward over reverse without a decomposition. " +
"Please remove the decomposition version")
def is_differentiable(t):
return isinstance(t, torch.Tensor) and t.dtype == torch.float32
args = (cotangents, *primals)
if op.name == 'nn.functional.binary_cross_entropy':
argnums = (0, 1) # targets is float32 but isn't differentiable
atol_rtol = 1.5e-4, 1.3e-06
else:
argnums = tuple(i for i in range(len(args)) if is_differentiable(args[i]))
atol_rtol = None
self._compare_jacobians_of_vjp(fn, args, argnums, atol_rtol)
else:
expected = reference(primals, cotangents, primals_tangents, cotangents_tangents)
self.assertEqual(result, expected)
def _make_extremal_inputs(self, shape, device):
if shape == None:
return (None,)
return (
torch.full(shape, -1000., device=device),
torch.zeros(shape, device=device),
torch.full(shape, 1000., device=device),
)
def _arg_and_kwarg_options(self, args_options, kwargs_options):
return itertools.product(*args_options, kwargs_options)
def test_extremal_numerics_nll_loss(self, device):
N, C = 3, 4
d1, d2, d3 = 5, 6, 7
shapes = (
((N, C), (N,), (C,)),
((N, C), (N,), None),
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
)
kwargs_options = ({'ignore_index': 0, 'reduction': 'mean'}, {'reduction': 'sum'}, {'reduction': 'none'}, {})
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
target = torch.randint(0, C, target_shape, device=device)
target[0] = 1 # since we're ignoring index 0, at least one element must be non-zero
fn = functools.partial(torch.nn.functional.nll_loss, target=target, weight=weight, **kwargs)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input))
def test_extremal_numerics_l1_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'reduction': 'sum'}, {'reduction': 'none'}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
for input, target, kwargs in self._arg_and_kwarg_options((input_options, target_options), kwargs_options):
result = torch.nn.functional.l1_loss(input, target)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.l1_loss, (cotangents, input, target))
def test_extremal_numerics_mse_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'reduction': 'sum'}, {'reduction': 'none'}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
for input, target, kwargs in self._arg_and_kwarg_options((input_options, target_options), kwargs_options):
result = torch.nn.functional.mse_loss(input, target)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.mse_loss, (cotangents, input, target))
def test_extremal_numerics_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'dim': 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
result = torch.nn.functional.softmax(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.softmax, (cotangents, input))
def test_extremal_numerics_log_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({'dim': 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
result = torch.nn.functional.log_softmax(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(torch.nn.functional.log_softmax, (cotangents, input))
def test_extremal_numerics_cross_entropy(self, device):
N, C = 3, 4
d1, d2, d3 = 5, 6, 7
shapes = (
((N, C), (N,), (C,)),
((N, C), (N,), None),
((N, C), (N, C), (C,)),
((N, C), (N, C), None),
((C,), (), (C,)),
((C,), (), None),
((C,), (C,), (C,)),
((C,), (C,), None),
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
((N, C, d1, d2, d3), (N, C, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, C, d1, d2, d3), None),
)
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
kwargs_options = [{'reduction': 'sum'}, {'reduction': 'none'}, {}]
if input_shape != target_shape:
kwargs_options.append({'ignore_index': 0, 'reduction': 'mean'})
for input, kwargs in self._arg_and_kwarg_options((input_options,), kwargs_options):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
if input_shape == target_shape:
target = torch.rand(target_shape, device=device)
elif len(target_shape) == 0:
target = torch.tensor(1, device=device) # must be non-zero since ignore_index may be 0
else:
target = torch.randint(0, C, target_shape, device=device)
fn = functools.partial(torch.nn.functional.cross_entropy, target=target, weight=weight, **kwargs)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input), atol_rtol=(1e-4, 1e-5))
def test_extremal_numerics_binary_cross_entropy(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
weight_options = self._make_extremal_inputs(shape, device)
kwargs_options = [{'reduction': 'sum'}, {'reduction': 'none'}, {}]
for weight, kwargs in self._arg_and_kwarg_options((weight_options,), kwargs_options):
input = torch.rand(shape, device=device)
target = torch.rand(shape, device=device)
fn = functools.partial(torch.nn.functional.binary_cross_entropy, target=target, weight=weight, **kwargs)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input), atol_rtol=(1e-4, 2e-5))
def test_extremal_numerics_layer_norm(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
normalized_shape = shape[1:]
weight_options = self._make_extremal_inputs(normalized_shape, device)
bias_options = self._make_extremal_inputs(normalized_shape, device)
for input, bias, weight in self._arg_and_kwarg_options((input_options, bias_options, weight_options), ()):
def fn(input, weight, bias):
return torch.nn.functional.layer_norm(input, normalized_shape, weight=weight, bias=bias)
result = fn(input, weight, bias)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input, weight, bias))
@ops(filter(lambda op: op.name == "nn.functional.group_norm", functorch_lagging_op_db + additional_op_db),
allowed_dtypes=(torch.float32, torch.double)) # TODO: generalize
def test_group_norm_backward(self, device, dtype, op):
# hacky, only works since no group norm inputs can be scalars
def was_skipped_from_batched_tensors(batched_out, batch_size):
return batched_out.shape == (batch_size,) and all(tuple(e == 1 for e in batched_out))
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in sample_inputs:
cotangents = get_sample_cotangents(op, sample_input)
f, args = get_autograd_fn_and_args_with_cotangents(op, sample_input, cotangents)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(f, args, {}, opinfo=op):
if all(was_skipped_from_batched_tensors(bo, lo.shape[0]) for (bo, lo) in zip(batched_out, loop_out)):
continue # we weren't able to use the batched tensor in autograd.grad
self.assertEqual(loop_out, batched_out)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
if __name__ == '__main__':
run_tests()
|
Recycled/collector/stock_fina_tushare.py | lifg2000/StockAnalysisSystem | 138 | 12737068 | <reponame>lifg2000/StockAnalysisSystem
import tushare as ts
import config
class StockFinancialDataFromTuShare:
def __init__(self):
ts.set_token(config.TS_TOKEN)
self.__pro = ts.pro_api()
def init(self) -> bool:
pass
def inited(self) -> bool:
pass
# Validate this Collector is still valid or not.
def validate(self) -> bool:
pass
# Fetch data from internet.
def fetch_data(self, **kw) -> bool:
pass
# Auto check and update data to DB. Depends on collector's implementation.
def check_update(self, **kw) -> bool:
pass
# Force update all data in DB.
def force_update(self, **kw) -> bool:
pass
|
tests/testapp/forms.py | cursive-works/wagtailmedia | 176 | 12737069 | <reponame>cursive-works/wagtailmedia
from django.forms import ModelForm
from django.forms.widgets import Widget
class OverridenWidget(Widget):
pass
class AlternateMediaForm(ModelForm):
class Meta:
widgets = {
"tags": OverridenWidget,
"file": OverridenWidget,
"thumbnail": OverridenWidget,
}
|
software/fpga/ov3/ovhw/leds.py | twam/ov_ftdi | 247 | 12737085 | <gh_stars>100-1000
from migen import *
from misoc.interconnect.csr import AutoCSR, CSRStorage
from itertools import zip_longest
# Basic programmable LED module
class LED_outputs(Module, AutoCSR):
def __init__(self, leds_raw, leds_muxes=None, active=1):
"""
leds_raw: output IOs for the LEDs
leds_muxes: internal digital signals that could feed a LED
"""
leds = Signal(len(leds_raw))
# Register containing the desired LED status
self._out = CSRStorage(len(leds), atomic_write=True)
# For each LED, we generate a MUX register.
# The MUX register can connect either the bit in the 'output' register or
# signals supplied via led_muxes
if leds_muxes:
assert len(leds_muxes) == len(leds)
for n in range(len(leds)):
name = "mux_%d" % n
attr = CSRStorage(8, atomic_write=True, name=name)
setattr(self, "_%s" % name, attr)
mux_vals = [self._out.storage[n]]
if leds_muxes[n]:
mux_vals.extend(leds_muxes[n])
cases = {k: leds[n].eq(v) for k, v in enumerate(mux_vals)}
self.comb += [
leds[n].eq(0),
Case(attr.storage, cases)
]
else:
self.comb += [
leds.eq(self._out.storage),
]
self.comb += [
leds_raw.eq(leds if active else ~leds)
]
|
benchmarks/django-workload/uwsgi/files/django-workload/django_workload/urls.py | jonasbn/cloudsuite | 103 | 12737107 | <reponame>jonasbn/cloudsuite<gh_stars>100-1000
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^feed_timeline$', views.feed_timeline, name='feed_timeline'),
url(r'^timeline$', views.timeline, name='timeline'),
url(r'^bundle_tray$', views.bundle_tray, name='bundle_tray'),
url(r'^inbox$', views.inbox, name='inbox'),
url(r'^seen$', views.seen, name='seen'),
]
|
sbuild/sbuild/__init__.py | IMULMUL/static-binaries | 2,049 | 12737136 | <gh_stars>1000+
__author__ = '<NAME> <<EMAIL>>'
__version__ = '0.0.1'
|
tlslite/utils/python_eddsakey.py | tomato42/tlslite-1 | 121 | 12737145 | # Author <NAME>, copyright 2021
from .eddsakey import EdDSAKey
from ecdsa.keys import BadSignatureError
from ecdsa.der import UnexpectedDER
from .cryptomath import numBits
from .compat import compatHMAC
class Python_EdDSAKey(EdDSAKey):
"""
Concrete implementation of EdDSA object backed by python-ecdsa.
Object that uses the common, abstract API of asymmetric keys
that uses the python-ecdsa library for the cryptographic operations.
:vartype public_key: VerifyingKey
:ivar public_key: python-ecdsa object for veryfying EdDSA signatures, if
`private_key` is set, it should match it (should be able to verify
signatures created by it)
:vartype private_key: SigningKey
:ivar private_key: python-ecdsa object for creating EdDSA signatures
:vartype key_type: str
:ivar key_type: type of assymetric algorithm used by the keys - for this
objects it is either "Ed25519" or "Ed448"
"""
def __init__(self, public_key, private_key=None):
if not public_key and not private_key:
raise ValueError("at least one key must be provided")
if not public_key:
public_key = private_key.verifying_key
self.curve_name = public_key.curve.name
self.private_key = private_key
self.public_key = public_key
self.key_type = self.curve_name
def __len__(self):
return numBits(self.public_key.curve.order)
def hasPrivateKey(self):
return bool(self.private_key)
def acceptsPassword(self):
return False
@staticmethod
def generate(bits):
raise NotImplementedError()
def _hashAndSign(self, data):
return self.private_key.sign_deterministic(compatHMAC(data))
def _hashAndVerify(self, signature, data):
try:
return self.public_key.verify(compatHMAC(signature),
compatHMAC(data))
# https://github.com/warner/python-ecdsa/issues/114
except (BadSignatureError, UnexpectedDER, IndexError, AssertionError):
return False
|
suplemon/linelight/php.py | johnmbaughman/suplemon | 912 | 12737176 | <gh_stars>100-1000
from suplemon.linelight.color_map import color_map
class Syntax:
def get_comment(self):
return ("//", "")
def get_color(self, raw_line):
color = color_map["white"]
line = raw_line.strip()
keywords = ("if", "else", "finally", "try", "catch", "foreach",
"while", "continue", "pass", "break")
if line.startswith(("include", "require")):
color = color_map["blue"]
elif line.startswith(("class", "public", "private", "function")):
color = color_map["green"]
elif line.startswith("def"):
color = color_map["cyan"]
elif line.startswith("return"):
color = color_map["red"]
elif line.startswith("$"):
color = color_map["cyan"]
elif line.startswith(("#", "//", "/*", "*/")):
color = color_map["magenta"]
elif line.startswith(keywords):
color = color_map["yellow"]
return color
|
amazon_paapi/models/variations_result.py | frenners/python-amazon-paapi | 121 | 12737189 | <filename>amazon_paapi/models/variations_result.py
from typing import List
from .item_result import Item
from ..sdk.models import VariationsResult, VariationSummary
class ApiPrice:
amount: float
currency: str
display_amount: str
class ApiVariationDimension:
display_name: str
name: str
values: List[str]
class ApiVariationPrice:
highest_price: ApiPrice
lowest_price: ApiPrice
class ApiVariationSummary(VariationSummary):
page_count: int
price: ApiVariationPrice
variation_count: int
variation_dimensions: List[ApiVariationDimension]
class VariationsResult(VariationsResult):
items: List[Item]
variation_summary: ApiVariationSummary
|
vk_api__examples/wall_post__images.py | gil9red/SimplePyScripts | 117 | 12737192 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Скрипт ищет картинки в инете и помещает на стену пользователя vk.com
"""
import sys
import random
from urllib.request import urlopen
from typing import List
from vk_api.upload import VkUpload
from root_config import DIR
from root_common import get_vk_session
# Для импортирования yandex_search_img.py
sys.path.append(str(DIR.parent))
from yandex_search_img import get_images
def get_attachments(upload: VkUpload, urls: List[str]) -> str:
rs = upload.photo_wall([urlopen(url) for url in urls])
return ','.join(f"photo{photo['owner_id']}_{photo['id']}" for photo in rs)
OWNER_ID = None
vk_session = get_vk_session()
vk = vk_session.get_api()
upload = VkUpload(vk_session)
text = 'Котята'
urls = get_images(text)
# "Перемешаем" элементы списка
random.shuffle(urls)
# Добавление сообщения на стену пользователя (owner_id это id пользователя)
# Если не указывать owner_id, то сообщение будет отправлено себе на стену
rs = vk.wall.post(
owner_id=OWNER_ID,
message=text + ' 3 шт.',
attachments=get_attachments(upload, urls[:3]),
)
print('rs:', rs)
rs = vk.wall.post(
owner_id=OWNER_ID,
message=text + ' 1 шт.',
attachments=get_attachments(upload, urls[:1]),
)
print('rs:', rs)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.