content
stringlengths 5
1.05M
|
---|
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
# Import core packages
import os
# Import Flask
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# Inject Flask magic
app = Flask(__name__)
# Load configuration
app.config.from_object('app.config.Config')
# Construct the DB Object (SQLAlchemy interface)
db = SQLAlchemy (app)
# Enabel migration for our application
Migrate(app, db)
# Import routing to render the pages
from app import views, models
|
import click
from koapy.cli.utils.credentials import get_credentials
from koapy.cli.utils.verbose_option import verbose_option
@click.group(short_help="Update openapi module and metadata.")
def update():
pass
@update.command(short_help="Update openapi TR metadata.")
@verbose_option()
def trinfo():
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusTrInfo import (
KiwoomOpenApiPlusTrInfo,
)
KiwoomOpenApiPlusTrInfo.dump_trinfo_by_code()
@update.command(short_help="Update openapi realtype metadata.")
@verbose_option()
def realtype():
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusRealType import (
KiwoomOpenApiPlusRealType,
)
KiwoomOpenApiPlusRealType.dump_realtype_by_desc()
@update.command(short_help="Update openapi module version.")
@click.option(
"-i", "--interactive", is_flag=True, help="Put login information with prompts."
)
@verbose_option(default=5, show_default=True)
def openapi(interactive):
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusVersionUpdater import (
KiwoomOpenApiPlusVersionUpdater,
)
credentials = get_credentials(interactive)
updater = KiwoomOpenApiPlusVersionUpdater(credentials)
updater.update_version_if_necessary()
@update.command(short_help="Update gRPC stub files by compiling proto files.")
def proto():
from koapy.backend.kiwoom_open_api_plus.grpc.tools.compile_proto import (
compile_proto,
)
compile_proto()
|
from collections import defaultdict
import tensorflow as tf
import numpy as np
import csv
import hashlib
from pathlib import Path
import re
FOLDER_LOCATION = 8
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def create_tf_example(predictions, raw_img, tag_map):
filename = predictions[0][0]
height = int(predictions[0][6])
width = int(predictions[0][6])
key = hashlib.sha256(raw_img).hexdigest()
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for prediction in predictions:
ymin.append(float(prediction[4]))
xmin.append(float(prediction[2]))
ymax.append(float(prediction[5]))
xmax.append(float(prediction[3]))
tag_name = prediction[1]
classes_text.append(tag_name.encode('utf8'))
classes.append(tag_map[tag_name])
truncated.append(0)
poses.append("Unspecified".encode('utf8'))
difficult_obj.append(0)
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature([height]),
'image/width': int64_feature([width]),
'image/filename': bytes_feature([
filename.encode('utf8')]),
'image/source_id': bytes_feature([
filename.encode('utf8')]),
'image/key/sha256': bytes_feature([key.encode('utf8')]),
'image/encoded': bytes_feature([raw_img]),
'image/format': bytes_feature(['jpeg'.encode('utf8')]),
'image/object/bbox/xmin': float_feature(xmin),
'image/object/bbox/xmax': float_feature(xmax),
'image/object/bbox/ymin': float_feature(ymin),
'image/object/bbox/ymax': float_feature(ymax),
'image/object/class/text': bytes_feature(classes_text),
'image/object/class/label': int64_feature(classes),
'image/object/difficult': int64_feature(difficult_obj),
'image/object/truncated': int64_feature(truncated),
'image/object/view': bytes_feature(poses),
}))
return example
def create_tf_record(pred_file, record_file, image_loc, user_folders, split_names=["train","val"],
split_percent=[.7,.3], tag_names = ["stamp"], test_file=None):
record_file = Path(record_file)
with open(pred_file, 'r') as file:
reader = csv.reader(file)
next(reader, None)
all_preds = list(reader)
all_files = defaultdict(list)
if test_file is not None:
with open(test_file, 'r') as file:
reader = csv.reader(file)
next(reader, None)
all_test = set((row[0] for row in reader))
for row in all_preds:
if row[0] not in all_test:
all_files[row[0]].append(row)
else:
for row in all_preds:
all_files[row[0]].append(row)
rand_list = list(all_files)
np.random.shuffle(rand_list)
split_percent = np.cumsum(split_percent)
split_percent = split_percent[:-1]
split_percent *= len(rand_list)
split_percent = split_percent.round().astype(np.int)
split_preds = np.split(rand_list,split_percent)
tag_map = {name: index for index, name in enumerate(tag_names, 1)}
for name, filenames in zip(split_names, split_preds):
writer = tf.python_io.TFRecordWriter("{}_{}".format(record_file.with_suffix(''), name) + record_file.suffix)
for filename in filenames:
predictions = all_files[filename]
if user_folders:
file_loc = str(Path(image_loc)/predictions[0][FOLDER_LOCATION]/filename)
else:
file_loc = str(Path(image_loc)/filename)
with open(file_loc, "rb") as img_file:
raw_img = img_file.read()
tf_example = create_tf_example(predictions, raw_img, tag_map)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == "__main__":
#select_jsons(r"C:\Users\t-yapand\Desktop\GAUCC",r"C:\Users\t-yapand\Desktop\GAUCC.json",True,r"C:\Users\t-yapand\Desktop\GAUCC1_1533070038606.csv")
from azure.storage.blob import BlockBlobService
import sys
import os
# Allow us to import utils
config_dir = str(Path.cwd().parent / "utils")
if config_dir not in sys.path:
sys.path.append(config_dir)
from config import Config
if len(sys.argv)<2:
raise ValueError("Need to specify config file")
config_file = Config.parse_file(sys.argv[1])
block_blob_service = BlockBlobService(account_name=config_file["AZURE_STORAGE_ACCOUNT"], account_key=config_file["AZURE_STORAGE_KEY"])
container_name = config_file["label_container_name"]
file_date = [(blob.name, blob.properties.last_modified) for blob in block_blob_service.list_blobs(container_name) if re.match(r'tagged_(.*).csv', blob.name)]
if file_date:
block_blob_service.get_blob_to_path(container_name, max(file_date, key=lambda x:x[1])[0], config_file["tagged_output"])
else:
raise ValueError("No tagged data exists. Cannot train model without any tagged data.")
file_date = [(blob.name, blob.properties.last_modified) for blob in block_blob_service.list_blobs(container_name) if re.match(r'test_(.*).csv', blob.name)]
if file_date:
block_blob_service.get_blob_to_path(container_name, max(file_date, key=lambda x:x[1])[0], config_file["test_output"])
create_tf_record(config_file["tagged_output"],config_file["tf_record_location"],config_file["image_dir"],
config_file["user_folders"]=="True", tag_names=config_file["classes"].split(","), test_file=config_file["test_output"])
else:
create_tf_record(config_file["tagged_output"],config_file["tf_record_location"],config_file["image_dir"],
config_file["user_folders"]=="True", tag_names=config_file["classes"].split(","))
|
"""Coin change Problem"""
"""
EX:
Change for $51 using ($1, 2, 5, 10, 20)
Greedy Approach
Subtract largest bills
51 -20 = 31 - 20 = 11 - 10= 1 - 1= 0
20, 20, 10, 1 = 5 bills
What if bills to use were (3, 5, 7, 11)?
Smallest # bills for $13?
13 - 11 = 2 Can't go further
C dollars want to use 1 d dollar
C - d bills + 1 for using one d dollar.
EX: $10 and 2 dollar bills
min to make $10 -> min to make $8 + 1 ->
min to make $6 + 2 -> $4 + 3 -> $2 + 4 -> $0 + 5
= 5 $2 bills to make $10
Base Case:
C where is is some # of dollars.
Bills(0) = 0
Bills(C) = impossible if C < 0
Subproblem:
Bills(C) = Bills(C - d) + 1 if bills(C -d) is possible
Bills(C) = impossible if bills(C - d) is impossible
EX: C = 10, d=100 -> C-d = -90 impossible
EX:
C = 10
d = 2
bills(10)
= bills(8) + 1
= bills(6) + 1 + 1
= bills(4) + 1 + 1 + 1
= bills(2) + 1 + 1 + 1 + 1
= bills(0) + 1 + 1 + 1 + 1 + 1
= 5
bills[0] = 0
for c from 1 to C
if c - d >= 0 and bills[c - d] is not impossible:
bills[c] = bills[c-d] + 1
else:
bills[c] = impossible
return bills[C-1]
Extend to mutiple denominations
- if we want to see all possible combinations
then we need try all of them and get the min # bills.
- dn is some denomination min(C-dn)
-
let denom[] be an array of denominations
let bills[C] be smallest amount of bills to make C with denoms
Base Case:
bills[0] = 0
Subproblem
for c from 1 to C
bills[c] = impossible
for d in denom
if c-d >0 and bills[c-d] is not impossible
bill[c] = min(bills[c], bills[c-d] + 1)
EX:
denom = [3,4,5]
C = 7
bills[0] = 0
bills[1] = impossible (bills[1 -3], bills[1-4], bills[1-5] are
impossible
...
bills[C] = (bills[3] + 1 or bills[4] + 1)
"""
def min_number_of_bills(C, denom):
# base case
if C == 0:
return 0
bills = [0] * (C + 1)
for c in range(1, C + 1):
bills[c] = float('inf')
for d in denom:
if c - d >= 0 and bills[c - d] != float('inf'):
bills[c] = min(bills[c], bills[c - d] + 1)
return bills[C]
if __name__=="__main__":
print('C = 7, Expect 2 Returned ', min_number_of_bills(7, [3,4,5]))
|
from typing import Any
class BinaryNode:
value: Any
left_child: 'BinaryNode'
right_child: 'BinaryNode'
def __init__(self,val: Any):
self.value=val
self.left_child=None
self.right_child=None
def min(self):
if self.left_child!=None:
return self.left_child.min()
return self
def _contains(self, value: Any):
if self.value == value:
return True
elif self.value > value:
if self.left_child!=None:
return self.left_child._contains(value)
else:
return False
else:
if self.right_child!=None:
return self.right_child._contains(value)
else:
return False
def show(self,level):
if self.right_child!=None:
self.right_child.show(level+1)
print(' ' * 4 * level + '->', self.value)
if self.left_child!=None:
self.left_child.show(level+1)
class BinarySearchTree:
root: BinaryNode
def __init__(self, node: 'BianryNode'):
self.root=node
def insert(self, value: Any):
self.root= self._insert(self.root,value)
def _insert(self, node: BinaryNode, value: Any):
if value<node.value:
if node.left_child==None:
node.left_child=BinaryNode(value)
else:
self._insert(node.left_child,value)
else:
if node.right_child==None:
node.right_child=BinaryNode(value)
else:
self._insert(node.right_child,value)
return node
def insertlist(self, lista):
for element in lista:
self.insert(element)
def contains(self, value: Any):
return self.root._contains(value)
def remove(self, value: Any):
self.root= self._remove(self.root, value)
def _remove(self, node: BinaryNode, value: Any):
if node!=None:
if value == node.value:
if((node.left_child==None)&(node.right_child==None)):
return None
elif node.left_child==None:
return node.right_child
elif node.right_child==None:
return node.left_child
node.value=node.right_child.min().value
node.right_child=self._remove(node.right_child,node.value)
elif value < node.value:
node.left_child=self._remove(node.left_child,value)
else:
node.right_child=self._remove(node.right_child,value)
return node
def show(self):
print(self.root.value)
self._show_children('', self.root)
def _show(self, padding, has_sibling, node: BinaryNode):
both = '├── '
single = '└── '
if node:
if has_sibling:
arrow = both
new_padding = padding + '│ '
else:
arrow = single
new_padding = padding + ' '
val = str(node.value)
print(padding + arrow + val)
self._show_children(new_padding, node)
def _show_children(self, padding, node: BinaryNode):
has_right_child = node.right_child is not None
self._show(padding, has_right_child, node.left_child)
self._show(padding, False, node.right_child)
def type(x: Any):
print(str(x), end = ' ')
tree = BinarySearchTree(BinaryNode(5))
tree.insert(3)
tree.insert(8)
tree.insert(7)
tree.insert(9)
tree.insert(1)
tree.insert(4)
tree.insert(2)
tree.insert(6)
tree.show()
print("=========================================")
print(tree.root.min().value)
print(tree.root.right_child.min().value)
print(tree.contains(10))
print(tree.contains(7))
print("=========================================")
tree.insertlist([10,0])
tree.remove(5)
tree.show() |
"""
INTERNAL FUNCTIONS FOR XViewMiddleware and ViewPanel
"""
import sys
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.loader import get_template
if sys.version_info[0] >= 3:
string_types = str
else:
string_types = basestring
def track_view_name(request, view_func):
if request.META.get("REMOTE_ADDR") in settings.INTERNAL_IPS or (
request.user.is_active and request.user.is_staff
):
view_name = f"{view_func.__module__}.{get_view_name(view_func)}"
request._xview = view_name
return view_name
def get_view_name(view_func):
if not hasattr(view_func, "__name__"):
# e.g. django.contrib.formtools.views.FormWizard object with __call__() method
return view_func.__class__.__name__
else:
return view_func.__name__
def get_used_view_name(request):
return getattr(request, "_xview", None)
def get_used_template(response):
"""
Get the template used in a TemplateResponse.
This returns a tuple of "active choice, all choices"
"""
if not hasattr(response, "template_name"):
return None, None
template = response.template_name
if template is None:
return None, None
if isinstance(template, (list, tuple)):
# See which template name was really used.
if len(template) == 1:
return template[0], None
else:
used_name = _get_used_template_name(template)
return used_name, template
elif isinstance(template, string_types):
# Single string
return template, None
else:
# Template object.
filename = _get_template_filename(template)
template_name = f"<template object from {filename}>" if filename else "<template object>"
return template_name, None
def _get_used_template_name(template_name_list):
"""
Find which template of the template_names is selected by the Django loader.
"""
for template_name in template_name_list:
try:
get_template(template_name)
return template_name
except TemplateDoesNotExist:
continue
def _get_template_filename(template):
# With TEMPLATE_DEBUG = True, each node tracks it's origin.
try:
return template.nodelist[0].origin[0].name
except (AttributeError, IndexError):
return None
|
def leiaInt(msg):
while True:
try:
n = int(input(msg))
except (ValueError, TypeError):
print('\033[31mERRO: por favor, digite um número inteiro válido.\033[m')
continue
except(KeyboardInterrupt):
print('\033[31mUsuário preferiu não digitar esse número.\033[m')
return 0
else:
return n
def linha(tam=42):
return '-' * tam
def cabecalho(txt):
print(linha())
print(txt.center(32))
print(linha())
def menu(lista):
cabecalho('MENU PRINCIPAL')
c = 1
for item in lista:
print(f'\033[33m{c}\033[m - \033[m{item}\033[m')
c += 1
print(linha())
opc = leiaInt('\033[32mSua Opção: \033[m ')
return opc |
from pckg1 import A
|
import numpy as np
def polynomial_interpolate(x, y):
M = [[x_i ** i for i in range(len(x))] for x_i in x]
a = np.linalg.solve(M, y)
def p(t):
return sum(a_i * t ** i for i, a_i in enumerate(a))
return p
def linear_curve_fit(x, y):
x = np.array(x)
y = np.array(y)
n = len(x)
x_bar = np.mean(x)
y_bar = np.mean(y)
s_xx = np.sum(np.square(x)) - n * x_bar * x_bar
s_xy = np.sum(x * y) - n * x_bar * y_bar
beta_1 = s_xy / s_xx
beta_0 = y_bar - beta_1 * x_bar
def f(t):
return beta_0 + beta_1 * t
return f
def exp_curve_fit(x, y):
x = np.array(x)
y = np.log(y)
n = len(x)
x_bar = np.mean(x)
y_bar = np.mean(y)
s_xx = np.sum(np.square(x)) - n * x_bar * x_bar
s_xy = np.sum(x * y) - n * x_bar * y_bar
beta = s_xy / s_xx
alpha = np.exp(y_bar - beta * x_bar)
def f(t):
return alpha * np.exp(beta * t)
return f
|
#!/usr/bin/env python3
"""
Author : Christian Ayala <[email protected]>, Viviana Freire <[email protected]>
Date : 2021-04-19
Purpose: Generate jobs scripts to be submitted to the UA HPC clusters
"""
import argparse
import subprocess
import os
import glob
import pandas as pd
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Run modules for metatranscriptomics analysis',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o',
'--outdir',
type=str,
help='Output directory',
default='metaT_pipe_out')
parser.add_argument('-t',
'--threads',
type=int,
help='Number of threads')
subparser = parser.add_subparsers()
parser_cr = subparser.add_parser('create_reference',
help='Module to create a reference for mapping')
parser_cr.add_argument('input_directory',
type=str,
help='Directory with the bins or contigs in fasta format'
'to create the reference for mapping')
parser_cr.add_argument('--input_type',
type=str,
choices=['contigs', 'bins'],
help='Input data type: bins or contigs',
default='bins')
parser_cr.set_defaults(func=create_reference)
parser_an = subparser.add_parser('annotate_reference',
help='Module to annotate reference')
parser_an.add_argument('input_reference',
type=str,
help='Directory with bins or contigs fasta file to annotate')
parser_an.add_argument('reference_type',
type=str,
choices=['contigs', 'bins'],
help='Input data type: bins or contigs',
default='bins')
parser_an.add_argument('--no_checkm',
action='store_true',
help='Do not run CheckM',
default=True)
parser_an.add_argument('--no_gtdbtk',
action='store_true',
help='Do not run GTDB-TK',
default=True)
parser_an.set_defaults(func=annotate_reference)
parser_map = subparser.add_parser('map_reads',
help='Module to map reads to defined reference')
parser_map.add_argument('--mapping_reference',
type=str,
help='Reference in fasta format to map the reads',
default=None)
parser_map.add_argument('--r1',
type=str,
help='Forward reads for mapping in fastq format',
default=None)
parser_map.add_argument('--r2',
type=str,
help='Reverse reads for mapping in fastq format',
default=None)
parser_map.add_argument('--interleaved',
type=str,
help='Interleaved reads for mapping in fastq format',
default=None)
parser_map.add_argument('--mapper',
type=str,
choices=['bwa-mem', 'bowtie2'],
help='Choose to use either bwa-mem or bowtie2 for mapping the reads',
default='bwa-mem')
parser_map.set_defaults(func=map_reads)
parser_gc = subparser.add_parser('get_read_counts',
help='Module to obtain read counts from the mapping files')
parser_gc.add_argument('mapping_directory',
type=str,
help='Directory with the mapping files in bam format')
parser_gc.add_argument('--gff',
type=str,
required=True,
help='gff file with the gene coordinates to extract counts')
parser_gc.set_defaults(func=get_read_counts)
args = parser.parse_args()
# Check that command line arguments are specified properly
return args
# --------------------------------------------------
def run_commands(cmd):
"""Function to run commands in the command line"""
p = subprocess.Popen(
cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate()
if out:
print(out)
if err:
print(err)
# --------------------------------------------------
def create_reference(args):
"""Create reference file for mapping"""
inputdir = os.path.join(args.input_directory, '*.fna')
outdir = os.path.join(args.outdir, 'create_reference')
if args.input_type == 'bins':
cmd = ['dRep', 'dereplicate', outdir, '-g', inputdir]
run_commands(cmd)
if args.input_type == 'contigs':
cmd = ['cat', inputdir, '>', 'concat_contigs.fasta']
run_commands(cmd)
cmd = ['cd-hit-est', '-i', 'concat_contigs.fasta', '-o', 'temp_contigs99.fasta',
'-c', '0.99', '-n', '10', '-T', args.threads]
run_commands(cmd)
cmd = ['seqkit', 'seq', '-m', '2000', 'temp_contigs99.fasta', '>',
os.path.join(outdir, 'derep_contigs99.fasta')]
run_commands(cmd)
cmd = ['rm', 'concat_contigs.fasta', 'temp_contigs99.fasta']
run_commands(cmd)
# --------------------------------------------------
def annotate_reference(args):
"""Annotate reference"""
outdir = os.path.join(args.outdir, 'annotate_reference')
if args.reference_type == 'bins':
if not args.no_checkm:
cmd = ['checkm', 'lineage_wf', '-t', args.threads, '-x', 'fna', args.input_reference,
os.path.join(outdir, 'checkm_results')]
run_commands(cmd)
cmd = ['checkm', 'qa', os.path.join(outdir, 'checkm_results', 'lineage.ms'),
os.path.join(outdir, 'checkm_results'), '--tab_table', '-f',
os.path.join(outdir, 'checkm_results', 'checkm_table.tsv')]
run_commands(cmd)
if not args.no_gtdbtk:
cmd = ['gtdbtk', 'classify_wf', '--genome_dir', args.input_reference, '--out_dir',
os.path.join(outdir, 'gtdb-tk_results'), '--cpus', args.threads]
run_commands(cmd)
if args.no_checkm is False and args.no_gtdbtk is False:
cmd = ['DRAM.py', 'annotate', '-i', "'" + os.path.join(args.input_reference, '*.fna') + "'", '-o',
os.path.join(outdir, 'dram_results'), '--checkm_quality',
os.path.join(outdir, 'checkm_results', 'checkm_table.tsv'), '--gtdb_taxonomy',
os.path.join(outdir, 'gtdb-tk_results', 'classify', 'gtdbtk.bac120.summary.tsv'),
'--threads', args.threads]
run_commands(cmd)
elif args.no_checkm is True and args.no_gtdbtk is False:
cmd = ['DRAM.py', 'annotate', '-i', "'" + os.path.join(args.input_reference, '*.fna') + "'", '-o',
os.path.join(outdir, 'dram_results'), '--gtdb_taxonomy',
os.path.join(outdir, 'gtdb-tk_results', 'classify', 'gtdbtk.bac120.summary.tsv'),
'--threads', args.threads]
run_commands(cmd)
elif args.no_checkm is False and args.no_gtdbtk is True:
cmd = ['DRAM.py', 'annotate', '-i', "'" + os.path.join(args.input_reference, '*.fna') + "'", '-o',
os.path.join(outdir, 'dram_results'), '--checkm_quality',
os.path.join(outdir, 'checkm_results', 'checkm_table.tsv'),
'--threads', args.threads]
run_commands(cmd)
if args.no_checkm is True and args.no_gtdbtk is True:
cmd = ['DRAM.py', 'annotate', '-i', "'" + os.path.join(args.input_reference, '*.fna') + "'", '-o',
os.path.join(outdir, 'dram_results'),
'--threads', args.threads]
run_commands(cmd)
if args.reference_type == 'contigs':
cmd = ['DRAM.py', 'annotate', '-i', args.input_reference, '-o',
os.path.join(outdir, 'dram_results'),
'--threads', args.threads]
run_commands(cmd)
# --------------------------------------------------
def map_reads(args):
"""Map reads to reference"""
outdir = os.path.join(args.outdir, 'map_reads')
if args.interleaved:
cmd = ['coverm', 'make', '-r', args.mapping_reference, '--interleaved', args.interleaved, '-p', args.mapper,
'-o', outdir, '-t', args.threads]
run_commands(cmd)
bam_file = os.path.join(outdir, os.path.basename(args.mapping_reference) + os.path.basename(args.interleaved) +
'.bam')
else:
cmd = ['coverm', 'make', '-r', args.mapping_reference, '-1', args.r1, '-2', args.r2, '-p', args.mapper,
'-o', outdir, '-t', args.threads]
run_commands(cmd)
bam_file = os.path.join(outdir, args.mapping_reference + args.interleaved + '.bam')
filtered_bam_file = 'filtered.' + bam_file
cmd = ['coverm', 'filter', '-b', bam_file, '-o', '-t', filtered_bam_file, args.threads]
run_commands(cmd)
sorted_bam_file = 'sorted.' + filtered_bam_file
cmd = ['samtools', 'sort', filtered_bam_file, '-o', sorted_bam_file]
run_commands(cmd)
cmd = ['samtools', 'index', '-b', sorted_bam_file]
run_commands(cmd)
# --------------------------------------------------
def get_read_counts(args):
"""Get number of reads that mapped to each gene"""
outdir = os.path.join(args.outdir, 'get_read_counts')
bam_files = glob.glob(args.mapping_directory + '**.bam')
for file in bam_files:
cmd = ['dirseq', '--bam', file, '--gff', args.gff, '--measure_type', 'count', '>',
os.path.join(outdir, file + '.counts.tsv')]
run_commands(cmd)
counts_table = pd.read_csv(glob.glob(outdir + '**.tsv')[0])[['ID']]
counts_files = glob.glob(outdir + '**.tsv')
for file in counts_files:
temp = pd.read_csv(file, sep='\t')
temp['final_count'] = temp['forward_read_count'] - temp['reverse_read_count']
temp[temp['final_count'] < 0] = 0
temp = temp[['ID', 'final_count']]
counts_table = counts_table.merge(temp, on='ID')
counts_table.to_csv(os.path.join(outdir, 'final_counts_table.csv'))
# --------------------------------------------------
def main():
"""Generate the job file"""
args = get_args()
args.func(args)
# --------------------------------------------------
if __name__ == '__main__':
main()
|
#!/awips2/python/bin/python
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# -----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------- -------- --------- ---------------------------------------------
# Jan 24, 2017 6092 randerso Initial Creation
#
##
import os
import sys
import time
import argparse
from dynamicserialize.dstypes.com.raytheon.uf.common.dataplugin.gfe.request.GfeClientRequest import GfeClientRequest
from dynamicserialize.dstypes.java.util import Date
from awips import ThriftClient
from awips import UsageArgumentParser
from awips.UsageArgumentParser import StoreTimeAction
from awips.UsageArgumentParser import TIME_FORMAT
def validateArgs(args=None):
parser = UsageArgumentParser.UsageArgumentParser(conflict_handler="resolve",
prog='gfeClient', add_help=False)
parser.add_argument("script", action="store",
help=argparse.SUPPRESS,
metavar="script")
parser.add_argument("-h", "--host", action="store", dest="host",
help="host name of edex request server",
default=str(os.getenv("DEFAULT_HOST", "localhost")),
metavar="hostname")
parser.add_argument("-p", "--port", action="store", type=int, dest="port",
help="port number of edex request server",
default=int(os.getenv("DEFAULT_PORT", "9581")),
metavar="port")
############################################################################
# -site is used for backward compatibility, --site is preferred
# long names with single dash are non-standard in Unix/Linux
############################################################################
parser.add_argument("--site", "-site", action="store", dest="site", required=True,
help="site ID",
metavar="site")
parser.add_argument("-c", "--config", action="store", dest="configFile", required=False,
default="gfeConfig",
help="GFE config file -- default gfeConfig",
metavar="configFile")
parser.add_argument("-u", action="store", dest="userName", required=False,
help="user name -- default SITE",
default="SITE",
metavar="userName")
parser.add_argument("-z", "--drt", action=StoreTimeAction, dest="drt", required=False,
help="displaced real time -- format YYYYMMDD_hhmm",
metavar="drt")
############################################################################
# adding this arguments so -s is not recognized as -site in other scripts
# -s is not used by this script
############################################################################
parser.add_argument("-s", action="store", dest="startTime", required=False,
help=argparse.SUPPRESS)
args, scriptArgs = parser.parse_known_args(args)
return parser, args, scriptArgs
def main(args):
# if no args other than script add --help so usage is displayed
if len(args) < 2:
args.extend(["--help"])
# if --help in args add dummy --site arg so we can display
# full script usage, not just the gfeClient.py usage
if "--help" in args:
args.extend(["--site", "XXX"])
parser, gfeClientArgs, scriptArgs = validateArgs(args)
# add config and user option to scriptArgs
scriptArgs.extend(["-c", gfeClientArgs.configFile, "-u", gfeClientArgs.userName])
# add drt option if specified
if gfeClientArgs.drt:
timeString = time.strftime(TIME_FORMAT, gfeClientArgs.drt)
scriptArgs.extend(["-z", timeString])
# add startTime option if specified
if gfeClientArgs.startTime:
scriptArgs.extend(["-s", gfeClientArgs.startTime])
# shutdown isn't a real script and has no gfeClientArgs to validate
if gfeClientArgs.script.lower() != "shutdown":
# call the validateArgs() method in the target script
scriptGlobals = {}
scriptLocals = {}
execfile(gfeClientArgs.script, scriptGlobals, scriptLocals)
scriptLocals["validateArgs"](args, [parser])
elif "--help" in args:
# Don't do shutdown if --help specified
# this is only for ifpIMAGE since it's calling shutdown until
# PngWriter can be fixed to run more than once in a session
sys.exit(0)
request = GfeClientRequest(gfeClientArgs.script, gfeClientArgs.site,
gfeClientArgs.configFile, gfeClientArgs.userName,
scriptArgs)
if gfeClientArgs.drt:
import calendar
timeInMillis = calendar.timegm(gfeClientArgs.drt) * 1000
request.setTime(Date(timeInMillis))
thriftClient = ThriftClient.ThriftClient(gfeClientArgs.host, gfeClientArgs.port, "/services")
thriftClient.sendRequest(request)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
#!/usr/bin/env python3
# encoding: utf-8
"""
insertion_sort.py
Created by Jakub Konka on 2011-11-01.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
import sys
import random as rnd
def insertion_sort(array):
'''This function implements the standard version of the
insertion sort algorithm.
Keyword arguments:
array -- input array of integers
Returns: None (sorted array)
'''
n = len(array)
for i in range(1, n):
tmp = array[i]
j = i - 1
done = False
while not done:
if tmp < array[j]:
array[j+1] = array[j]
j -= 1
if j < 0:
done = True
else:
done = True
array[j+1] = tmp
if __name__ == '__main__':
n = int(sys.argv[1])
array = [rnd.randint(1,100) for i in range(n)]
# print("Array before sorting: ", array)
insertion_sort(array)
# print("Array after sorting: ", array)
|
"""Collect from Phoenix Contact SOLARCHECK String Monitor and send the data to your cloud using Ardexa
See:
https://github.com/ardexa/solarcheck-strings
"""
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='solarcheck_ardexa',
version='1.3.0',
description='Collect from Phoenix Contact SOLARCHECK String Monitor and send the data to your cloud using Ardexa',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ardexa/solarcheck-strings',
author='Ardexa Pty Limited',
author_email='[email protected]',
python_requires='>=2.7',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='Phoenix Contact SOLARCHECK solar string monitor ardexa',
py_modules=['solarcheck_ardexa'],
install_requires=[
'future',
'ardexaplugin',
'Click',
],
entry_points={
'console_scripts': [
'solarcheck_ardexa=solarcheck_ardexa:cli',
],
},
project_urls={ # Optional
'Bug Reports': 'https://github.com/ardexa/solarcheck-strings/issues',
'Source': 'https://github.com/ardexa/solarcheck-strings/',
},
)
|
# Generated by Django 4.0.2 on 2022-02-13 10:29
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('todoapp_backend', '0003_alter_task_options_alter_task_created_at'),
]
operations = [
migrations.AlterField(
model_name='task',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2022, 2, 13, 10, 29, 11, 456693, tzinfo=utc)),
),
]
|
#!/usr/bin/env python
"""General demonstrations /w set start and goal
"""
import os
import numpy as np
import yaml
# pylint: disable=invalid-name
# pylint: disable=import-error
# pylint: disable=wrong-import-position
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
traj_path = os.path.join(parentdir, 'scripts')
os.sys.path.insert(0, traj_path)
from trajectory_solver import TrajectorySolver
from agent import Agent
# pylint: enable=invalid-name
# pylint: enable=import-error
# pylint: enable=wrong-import-position
# Read arguments from yaml file
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
FILE_PATH = os.path.join(PARENT_DIR, 'swarm_manager/conf/swarm_conf.yaml')
with open(FILE_PATH) as f:
YAML_CONF = yaml.load(f, Loader=yaml.FullLoader)
SOLVER_ARGS = YAML_CONF['trajectory_solver']
AGENT_ARGS = {'r_min': SOLVER_ARGS['r_min'],
'col_radius_ratio': SOLVER_ARGS['col_radius_ratio'],
'goal_dist_thres': SOLVER_ARGS['goal_dist_thres'],
'goal_speed_thres': SOLVER_ARGS['goal_speed_thres'],
}
def demo_two_agents():
"""Two agents trading spots, horizontal
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.5, 2.0, 0.0], goal=[4.0, 2.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[4.0, 2.0, 0.0], goal=[0.5, 2.0, 0.0])
return [a_1, a_2]
def demo_two_agents_vert():
"""Two agents trading spots, vertically
"""
a_1 = Agent(AGENT_ARGS, start_pos=[2.5, 1.0, 0.0], goal=[2.5, 4.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[2.5, 4.0, 0.0], goal=[2.5, 1.0, 0.0])
return [a_1, a_2]
def demo_wall(wall_coords=None):
"""Demo with a wall
Args:
wall_coords (list of tuple): [(x, y, z), (x, y, z)]
Returns:
list: [[agent_list], [wall_coords]]
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 2.0, 0.0], goal=[4.0, 2.0, 0.0])
obs_coords = compute_obstacle([wall_coords], 15)
return [a_1], obs_coords
def through_wall():
"""Wall"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 2.0, 0.0], goal=[4.0, 2.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[5.0, 2.0, 0.0], goal=[0.0, 2.0, 0.0])
obs_coords = compute_obstacle([[(2.0, -1.0, 0.), (2.0, 1.5, 0.0)],
[(2.0, 2.5, 0.), (2.0, 5.0, 0.0)]], 30)
return [a_1, a_2], obs_coords
def corners_2():
"""Two agents trading spots, starting from opposite corners
"""
# a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 4.0, 0.0], goal=[4.0, 0.0, 0.0])
# a_2 = Agent(AGENT_ARGS, start_pos=[4.0, 0.0, 0.0], goal=[0.0, 4.0, 0.0])
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 0.0, 0.0], goal=[4.0, 4.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[4.0, 4.0, 0.0], goal=[0.0, 0.0, 0.0])
return [a_1, a_2]
def corners_2_2():
"""Corners 2, v2
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 4.0, 0.0], goal=[4.0, 0.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[4.0, 0.0, 0.0], goal=[0.0, 4.0, 0.0])
return [a_1, a_2]
def corners_4():
"""Four agents, starting from opposite corners
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 4.0, 0.0], goal=[4.0, 0.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[4.0, 0.0, 0.0], goal=[0.0, 4.0, 0.0])
a_3 = Agent(AGENT_ARGS, start_pos=[0.0, 0.0, 0.0], goal=[4.0, 4.0, 0.0])
a_4 = Agent(AGENT_ARGS, start_pos=[4.0, 4.0, 0.0], goal=[0.0, 0.0, 0.0])
return [a_1, a_2, a_3, a_4]
def six_agents():
"""Six agents
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 0.0, 0.0], goal=[1.5, 3.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[2.0, 0.0, 0.0], goal=[0.0, 4.0, 0.0])
a_3 = Agent(AGENT_ARGS, start_pos=[1.0, 2.5, 0.0], goal=[4.0, 0.0, 0.0])
a_4 = Agent(AGENT_ARGS, start_pos=[4.0, 4.0, 0.0], goal=[0.0, 0.0, 0.0])
a_5 = Agent(AGENT_ARGS, start_pos=[2.5, 2.5, 0.0], goal=[4.0, 2.5, 0.0])
a_6 = Agent(AGENT_ARGS, start_pos=[3.2, 3.2, 0.0], goal=[0.5, 0.0, 0.0])
return [a_1, a_2, a_3, a_4, a_5, a_6]
def corners_6():
"""Six agents, starting from opposite corners
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 4.0, 0.0], goal=[4.0, 0.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[4.0, 0.0, 0.0], goal=[0.0, 4.0, 0.0])
a_3 = Agent(AGENT_ARGS, start_pos=[0.0, 0.0, 0.0], goal=[4.0, 4.0, 0.0])
a_4 = Agent(AGENT_ARGS, start_pos=[4.0, 4.0, 0.0], goal=[0.0, 0.0, 0.0])
a_5 = Agent(AGENT_ARGS, start_pos=[2.0, 0.0, 0.0], goal=[2.0, 4.0, 0.0])
a_6 = Agent(AGENT_ARGS, start_pos=[2.0, 4.0, 0.0], goal=[2.0, 0.0, 0.0])
return [a_1, a_2, a_3, a_4, a_5, a_6]
def seven_agents():
"""Seven agents
"""
a_1 = Agent(AGENT_ARGS, start_pos=[0.0, 0.0, 0.0], goal=[1.5, 3.0, 0.0])
a_2 = Agent(AGENT_ARGS, start_pos=[2.0, 0.0, 0.0], goal=[0.0, 4.0, 0.0])
a_3 = Agent(AGENT_ARGS, start_pos=[1.0, 2.5, 0.0], goal=[4.0, 0.0, 0.0])
a_4 = Agent(AGENT_ARGS, start_pos=[4.0, 4.0, 0.0], goal=[0.0, 0.0, 0.0])
a_5 = Agent(AGENT_ARGS, start_pos=[2.5, 2.5, 0.0], goal=[4.0, 2.5, 0.0])
a_6 = Agent(AGENT_ARGS, start_pos=[3.2, 3.2, 0.0], goal=[0.5, 0.0, 0.0])
a_7 = Agent(AGENT_ARGS, start_pos=[1.7, 0.5, 0.0], goal=[0.8, 3.8, 0.0])
return [a_1, a_2, a_3, a_4, a_5, a_6, a_7]
def update_test():
"""To test when agents position are changed after solver initialization
"""
a_list = ['a1', 'a2', 'a3']
agents = {}
for each_a in a_list:
agents[each_a] = Agent(AGENT_ARGS, )
agent_list = [agent for (_, agent) in agents.items()]
solver = TrajectorySolver(agent_list)
agents['a1'].set_starting_position([1.0, 1.0, 0.0])
agents['a1'].set_goal([4.0, 4.0, 0.0])
agents['a2'].set_starting_position([4.0, 4.0, 0.0])
agents['a2'].set_goal([1.0, 1.0, 0.0])
agents['a3'].set_starting_position([2.0, 0.5, 0.0])
agents['a3'].set_goal([2.0, 4.0, 0.0])
solver.update_agents_info()
solver.solve_trajectories()
solver.plot_trajectories()
def compute_obstacle(positions, n_pts):
"""Compute coordinates of a wall
Args:
position (list of list of list): [[obstacle1_start, obstacle1_end], [obstacle2]]; [x, y, z]
n_pts (int): Number of pts
Returns:
list: All coords of wall
"""
all_coords = []
for each_obstacle in positions:
obstacle_start = each_obstacle[0]
obstacle_end = each_obstacle[1]
coords = []
obstacle_x = np.linspace(obstacle_start[0], obstacle_end[0], num=n_pts)
obstacle_y = np.linspace(obstacle_start[1], obstacle_end[1], num=n_pts)
for (x_coord, y_coord) in zip(obstacle_x, obstacle_y):
coords.append([x_coord, y_coord, 0])
all_coords.append(coords)
return all_coords
|
from athena import ndarray
from athena import gpu_links as gpu_op
from athena import gpu_ops as ad
import numpy as np
import argparse
import six.moves.cPickle as pickle
import gzip
import os
import pdb
import time
import logging
from athena import gpu_memory_manager
channel_axis = 1
variable_list = []
val_list = []
rand = np.random.RandomState(seed=123)
def load_cifar10_data(directory):
images, labels = [], []
for filename in ['%s/data_batch_%d' % (directory, j) for j in range(1, 6)]:
with open(filename, 'rb') as fo:
cifar10 = pickle.load(fo)
for i in range(len(cifar10[b"labels"])):
# image = np.reshape(cifar10[b"data"][i], (3, 32, 32))
image = cifar10[b"data"][i]
image = image.astype(float)
images.append(image)
labels += cifar10[b"labels"]
images = np.array(images, dtype='float')
labels = np.array(labels, dtype='int')
train_images, train_labels = images, labels
images, labels = [], []
for filename in ['%s/test_batch' % (directory)]:
with open(filename, 'rb') as fo:
cifar10 = pickle.load(fo)
for i in range(len(cifar10[b"labels"])):
# image = np.reshape(cifar10[b"data"][i], (3, 32, 32))
image = cifar10[b"data"][i]
image = image.astype(float)
images.append(image)
labels += cifar10[b"labels"]
images = np.array(images, dtype='float')
labels = np.array(labels, dtype='int')
test_images, test_labels = images, labels
print
train_images.shape
return train_images, train_labels, test_images, test_labels
def convert_to_one_hot(vals, max_val=0):
"""Helper method to convert label array to one-hot array."""
if max_val == 0:
max_val = vals.max() + 1
one_hot_vals = np.zeros((vals.size, max_val))
one_hot_vals[np.arange(vals.size), vals] = 1
return one_hot_vals
def sgd_update_gpu(param, grad_param, learning_rate, swap=False):
"""Helper GPU SGD update method. Avoids copying NDArray to cpu."""
if not swap:
assert isinstance(param, ndarray.NDArray)
assert isinstance(grad_param, ndarray.NDArray)
if swap:
param = param - learning_rate * grad_param
else:
gpu_op.matrix_elementwise_multiply_by_const(
grad_param, -learning_rate, grad_param)
gpu_op.matrix_elementwise_add(param, grad_param, param)
def get_variable(name, size):
global variable_list, val_list
x = ad.Variable(name=name)
x_val = rand.normal(scale=0.1, size=size)
x_val = ndarray.array(x_val, ctx=ndarray.gpu(0))
variable_list.append(x)
val_list.append(x_val)
return x
def conv2d_bn(x, nb_filter, ni_filter, num_row, num_col, stride = 1, padding = 'same'):
if padding != 'same':
x = ad.conv2d_op(x, get_variable('W', (nb_filter, ni_filter, num_row, num_col)), stride=stride) #oihw
else:
# x = ad.pad_op(x, [[0, 0], [0, 0], [0, num_row - stride], [0, num_col - stride]])
x = ad.conv2d_op(x, get_variable('W', (nb_filter, ni_filter, num_row, num_col)),
(num_row - stride) / 2, (num_col - stride) / 2, stride=stride) #oihw
x = ad.batch_normalization_op(
x,get_variable('b_scale', (1, nb_filter, 1, 1)), get_variable('b_bias', (1, nb_filter, 1, 1)))
x = ad.relu_op(x)
return x
def block_inception_a(input, ni_filter):
branch_0 = conv2d_bn(input, 96, ni_filter, 1, 1)
branch_1 = conv2d_bn(input, 64, ni_filter, 1, 1)
branch_1 = conv2d_bn(branch_1, 96, 64, 3, 3)
branch_2 = conv2d_bn(input, 64, ni_filter, 1, 1)
branch_2 = conv2d_bn(branch_2, 96, 64, 3, 3)
branch_2 = conv2d_bn(branch_2, 96, 96, 3, 3)
branch_3 = ad.avg_pool2d_op(input, 3, 3, 1, 1)
branch_3 = conv2d_bn(branch_3, 96, ni_filter, 1, 1)
x = ad.concat_op(branch_0, branch_1, axis=1)
x = ad.concat_op(x, branch_2, axis=1)
x = ad.concat_op(x, branch_3, axis=1)
return x
def block_reduction_a(input, ni_filter):
branch_0 = conv2d_bn(input, 384, ni_filter, 3, 3, stride=2, padding='valid')
branch_1 = conv2d_bn(input, 192, ni_filter, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 192, 3, 3)
branch_1 = conv2d_bn(branch_1, 256, 224, 3, 3, stride=2, padding='valid')
branch_2 = ad.max_pool2d_op(input, 3, 3, 0, 2)
x = ad.concat_op(branch_0, branch_1, axis=1)
x = ad.concat_op(x, branch_2, axis=1)
return x
def block_inception_b(input, ni_filter):
branch_0 = conv2d_bn(input, 384, ni_filter, 1, 1)
branch_1 = conv2d_bn(input, 192, ni_filter, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 192, 1, 7)
branch_1 = conv2d_bn(branch_1, 256, 224, 7, 1)
branch_2 = conv2d_bn(input, 192, ni_filter, 1, 1)
branch_2 = conv2d_bn(branch_2, 192, 192, 7, 1)
branch_2 = conv2d_bn(branch_2, 224, 192, 1, 7)
branch_2 = conv2d_bn(branch_2, 224, 224, 7, 1)
branch_2 = conv2d_bn(branch_2, 256, 224, 1, 7)
branch_3 = ad.avg_pool2d_op(input, 3, 3, 1, 1)
branch_3 = conv2d_bn(branch_3, 128, ni_filter, 1, 1)
x = ad.concat_op(branch_0, branch_1, axis=1)
x = ad.concat_op(x, branch_2, axis=1)
x = ad.concat_op(x, branch_3, axis=1)
return x
def block_reduction_b(input, ni_filter):
branch_0 = conv2d_bn(input, 192, ni_filter, 1, 1)
branch_0 = conv2d_bn(branch_0, 192, 192, 3, 3, stride=2, padding='valid')
branch_1 = conv2d_bn(input, 256, ni_filter, 1, 1)
branch_1 = conv2d_bn(branch_1, 256, 256, 1, 7)
branch_1 = conv2d_bn(branch_1, 320, 256, 7, 1)
branch_1 = conv2d_bn(branch_1, 320, 320, 3, 3, stride=2, padding='valid')
branch_2 = ad.max_pool2d_op(input, 3, 3, 0, 2)
x = ad.concat_op(branch_0, branch_1, axis=1)
x = ad.concat_op(x, branch_2, axis=1)
return x
def block_inception_c(input, ni_filter):
branch_0 = conv2d_bn(input, 256, ni_filter, 1, 1)
branch_1 = conv2d_bn(input, 384, ni_filter, 1, 1)
branch_10 = conv2d_bn(branch_1, 256, 384, 1, 3)
branch_11 = conv2d_bn(branch_1, 256, 384, 3, 1)
branch_1 = ad.concat_op(branch_10, branch_11, axis=1)
branch_2 = conv2d_bn(input, 384, ni_filter, 1, 1)
branch_2 = conv2d_bn(branch_2, 448, 384, 3, 1)
branch_2 = conv2d_bn(branch_2, 512, 448, 1, 3)
branch_20 = conv2d_bn(branch_2, 256, 512, 1, 3)
branch_21 = conv2d_bn(branch_2, 256, 512, 3, 1)
branch_2 = ad.concat_op(branch_20, branch_21, axis=1)
branch_3 = ad.avg_pool2d_op(input, 3, 3, 1, 1)
branch_3 = conv2d_bn(branch_3, 256, ni_filter, 1, 1)
x = ad.concat_op(branch_0, branch_1, axis=1)
x = ad.concat_op(x, branch_2, axis=1)
x = ad.concat_op(x, branch_3, axis=1)
return x
def inception_v4_base(input, ni_filter):
net = conv2d_bn(input, 32, ni_filter, 3, 3, stride=2, padding='valid')
net = conv2d_bn(net, 32, 32, 3, 3, padding='valid')
net = conv2d_bn(net, 64, 32, 3, 3)
branch_0 = ad.max_pool2d_op(net, 3, 3, 0, 2)
branch_1 = conv2d_bn(net, 96, 64, 3, 3, stride=2, padding='valid')
net = ad.concat_op(branch_0, branch_1, axis=1)
branch_0 = conv2d_bn(net, 64, 160, 1, 1)
branch_0 = conv2d_bn(branch_0, 96, 64, 3, 3, padding='valid')
branch_1 = conv2d_bn(net, 64, 160, 1, 1)
branch_1 = conv2d_bn(branch_1, 64, 64, 1, 7)
branch_1 = conv2d_bn(branch_1, 64, 64, 7, 1)
branch_1 = conv2d_bn(branch_1, 96, 64, 3, 3, padding='valid')
net = ad.concat_op(branch_0, branch_1, axis=1)
branch_0 = conv2d_bn(net, 192, 192, 3, 3, stride=2, padding='valid')
branch_1 = ad.max_pool2d_op(net, 3, 3, 0, 2)
net = ad.concat_op(branch_0, branch_1, axis=1)
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
net = block_inception_a(net, 384)
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 384)
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
net = block_inception_b(net, 1024)
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 1024)
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
net = block_inception_c(net, 1536)
return net
def inception_v4(batch_size = 32, policy = "None"):
global variable_list, val_list
executor_ctx = ndarray.gpu(0)
variable_list = []
val_list = []
X = ad.Variable(name='X')
X_val = np.empty(shape=(batch_size, 3, 299, 299), dtype=np.float32)
# X_val = ndarray.array(X_val, ctx=ndarray.gpu(0))
y_ = ad.Variable(name='y_')
y_val = np.empty(shape=(batch_size, 10), dtype=np.float32)
# y_val = ndarray.array(y_val, ctx=ndarray.gpu(0))
model = inception_v4_base(X, 3)
model = ad.avg_pool2d_op(model, 8, 8, 0, 1)
model = ad.array_reshape_op(model, (batch_size, -1))
model = ad.matmul_op(model, get_variable('W', (1536, 10)))
y = model + ad.broadcastto_op(get_variable('b', (10)), (batch_size, 10))
loss = ad.softmaxcrossentropy_op(y, y_)
grad_list = ad.gradients(loss, variable_list)
if policy == "None" or policy == "base":
athena_exec = ad.Executor
elif policy == "vdnnconv" or policy == "vdnnall":
athena_exec = ad.vdnnExecutor
elif policy == "superneurons":
athena_exec = ad.superNeuronsExecutor
elif policy == "recompute_memory" or policy == "recompute_speed":
athena_exec = ad.recomputeExecutor
elif policy == "simulator":
athena_exec = microopOptimizer
elif policy == "profiler":
athena_exec = ad.profileExecutor
elif policy == "planner":
athena_exec = microopPlanner
elif policy == "tsplit":
athena_exec = ad.microopExecutor
else:
raise NotImplementedError
if policy == "vdnnconv":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "conv")
elif policy == "vdnnall":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "all")
elif policy == "recompute_memory":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "memory")
elif policy == "recompute_speed":
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx, policy = "speed")
else:
executor = athena_exec([loss] + grad_list + [y], ctx=executor_ctx)
feed_dict = dict()
feed_dict[X] = X_val
feed_dict[y_] = y_val
for i in range(len(variable_list)):
feed_dict[variable_list[i]] = val_list[i]
for i in range(2):
if i == 1:
start = time.time()
grad_val_list = executor.run(feed_dict)
end = time.time()
return (end - start) / 1
if __name__ == "__main__":
batch_size = 880
execution_time = inception_v4(batch_size, policy = "superneurons")
# # print(policy)
# for batch_size in range(32, 2000, 32):
# execution_time = inception_v4(batch_size, policy = policy)
# print("Batch size: {} , time: {} s\n".format(batch_size, execution_time))
# output_file.write("Batch size: {} , time: {} s\n".format(batch_size, execution_time))
# output_file.close() |
"""
Manages the displaying of the characters.
--
Author : DrLarck
Last update : 18/10/19 (DrLarck)
"""
# dependancies
import asyncio
# icons
from configuration.icon import game_icon
from configuration.color import game_color
# config
from configuration.bot import Bot_config
# utils
from utility.cog.character.getter import Character_getter
# translation
from utility.translation.translator import Translator
# embed
from utility.graphic.embed import Custom_embed
from utility.cog.displayer.color import Color_displayer
# displayer class
class Character_displayer:
"""
Manages the displaying of character informations, teams, etc.
- Parameter :
`character` : Represents a `list` of :class:`Character()` objects.
- Attribute :
- Method :
:coro:`display(combat_format`[Optional]`)` : Displays the characters in `character` list.
`combat_format`[bool] represents the format of the displaying.
"""
# attribute
def __init__(self, client, ctx, player):
# bot
self.client = client
self.player = player
self.ctx = ctx
# class
self.character = None
# method
async def display(self, summon_format = False, basic_format = False, combat_format = False, team_format = False, level = None, index = 0, team = 0):
"""
`coroutine`
Displays the characters in `character` list.
--
Return : discord.Message (embedded)
"""
# init
await self.character.init()
character_getter = Character_getter()
translation = Translator(self.client.db, self.player)
#_ = await translation.translate()
posture_icon = [":crossed_swords:", ":fire:", ":shield:", ":confused:"]
# embed
embed = Custom_embed(
self.client,
thumb = self.character.image.thumb
)
if(index == 0):
index = ""
embed = await embed.setup_embed()
## SUMMON FORMAT ##
if(summon_format):
summon_format = f"__Name__ : {self.character.image.icon}*{self.character.info.name}* {self.character.type.icon} {self.character.rarity.icon} `#{self.character.info.id}`\n"
summon_format += f"__Expansion__ : *{self.character.info.expansion}*{self.character.image.expansion}\n"
summon_format += f"__Saga__ : *{self.character.info.saga}*\n"
summon_format += f"__Damage__ :\n:punch: **{self.character.damage.physical_min:,}** - **{self.character.damage.physical_max:,}** \n{game_icon['ki_ability']} **{self.character.damage.ki_min:,}** - **{self.character.damage.ki_max:,}** \n"
summon_format += f"__Defense__ :\n:shield: **{self.character.defense.armor:,}**\n:rosette: **{self.character.defense.spirit:,}**\n"
summon_format += f"__Abilities__ :\n"
# get the abilities
ability_index = 1
if(len(self.character.ability) > 0):
for ability in self.character.ability:
await asyncio.sleep(0)
ability = ability(
None, None, None,
None, None, None
)
if(ability_index == 1):
summon_format += f"`{ability_index}. {ability.name}`{ability.icon}"
else:
summon_format += f" | `{ability_index}. {ability.name}`{ability.icon}"
ability_index += 1
else: # no ability
summon_format += "--"
# set the image
embed = Custom_embed(
self.client,
thumb = self.player.avatar,
colour = await Color_displayer().get_rarity_color(self.character.rarity.value)
)
# setup the embed
embed = await embed.setup_embed()
# config the embed
embed.set_image(url = self.character.image.image)
embed.add_field(
name = f"{self.player.name}'s summon",
value = summon_format,
inline = False
)
await self.ctx.send(embed = embed)
## BASIC FORMAT ##
elif(basic_format):
# get a character a second character for the comparison
comparison = await character_getter.get_character(self.character.info.id)
if(level != None):
if(level > 0):
comparison.level = level
# get the rarity
if(level <= Bot_config.rarity_level["n"]):
comparison.rarity.value = 0
# check if r
if(level <= Bot_config.rarity_level["r"] and level > Bot_config.rarity_level["n"]):
comparison.rarity.value = 1
# check if sr
elif(level <= Bot_config.rarity_level["sr"] and level > Bot_config.rarity_level["r"]):
comparison.rarity.value = 2
# check if ssr
elif(level <= Bot_config.rarity_level["ssr"] and level > Bot_config.rarity_level["sr"]):
comparison.rarity.value = 3
# check if ur
elif(level <= Bot_config.rarity_level["ur"] and level > Bot_config.rarity_level["ssr"]):
comparison.rarity.value = 4
# check if lr
elif(level <= Bot_config.rarity_level["lr"] and level > Bot_config.rarity_level["ur"]):
comparison.rarity.value = 5
else:
comparison.rarity.value = 5
level = 150
comparison.level = level
await comparison.init()
# set up the message
basic_format = f"__Name__ : {self.character.image.icon}*{self.character.info.name}* {self.character.type.icon} {self.character.rarity.icon} `#{self.character.info.id}`\n"
basic_format += f"__Expansion__ : *{self.character.info.expansion}*{self.character.image.expansion}\n"
basic_format += f"__Level__ : {self.character.level} vs *({level} {comparison.rarity.icon})*\n"
basic_format += f"__Saga__ : *{self.character.info.saga}*\n"
basic_format += f"__Health__ : **{self.character.health.maximum:,}**:hearts: *({comparison.health.maximum:,})*\n"
basic_format += f"__Damage__ :\n:punch: **{self.character.damage.physical_max:,}** *({comparison.damage.physical_max:,})* \n{game_icon['ki_ability']} **{self.character.damage.ki_max:,}** *({comparison.damage.ki_max:,})*\n"
basic_format += f"__Defense__ :\n:shield: **{self.character.defense.armor:,}** *({comparison.defense.armor:,})*\n:rosette: **{self.character.defense.spirit:,}** *({comparison.defense.spirit:,})*\n"
# setup the embed
embed = await Custom_embed(self.client, thumb = self.character.image.thumb).setup_embed()
embed.add_field(
name = f"{self.character.info.name}'s stats",
value = basic_format,
inline = False
)
embed.set_image(url = self.character.image.image)
# sending the embed
await self.ctx.send(embed = embed)
# now send the abilities info
if(len(self.character.ability) > 0):
for skill in self.character.ability:
await asyncio.sleep(0)
_skill = skill(self.client, self.ctx, None, None, None, None)
message = f"{_skill.icon}**__{_skill.name}__** (:fire: *{_skill.cost}*): *{_skill.description}*\n--"
await self.ctx.send(message)
## TEAM FORMAT ##
elif(team_format):
# posture
posture = None
if(self.character.posture.attacking == True):
posture = posture_icon[0]
if(self.character.posture.charging == True):
posture = posture_icon[1]
if(self.character.posture.defending == True):
posture = posture_icon[2]
if(self.character.posture.stunned == True):
posture = posture_icon[3]
team_format = f"__Level__ : **{self.character.level:,}**{self.character.rarity.icon}\n"
team_format += f"__Health__ : \n**{self.character.health.current:,}** / **{self.character.health.maximum:,}** :hearts:\n"
team_format += f"__Posture__ : {posture}\n"
# display bonus and malus
if(len(self.character.bonus) > 0):
team_format += f"__Bonus__ : "
for bonus in self.character.bonus:
await asyncio.sleep(0)
if(bonus.is_permanent):
team_format += f"{bonus.icon} ({bonus.stack}|*∞*) "
else:
team_format += f"{bonus.icon} ({bonus.stack}|{bonus.duration}) "
if(len(self.character.malus) > 0):
team_format += f"__Malus__ : "
for malus in self.character.malus:
await asyncio.sleep(0)
if(malus.is_permanent):
team_format += f"{malus.icon} ({malus.stack}|*∞*) "
else:
team_format += f"{malus.icon} ({malus.stack}|{malus.duration}) "
# color
if(team == 0):
embed = await Custom_embed(
self.client,
thumb = self.character.image.thumb,
colour = 0x009dff
).setup_embed()
if(team == 1):
embed = await Custom_embed(
self.client,
thumb = self.character.image.thumb,
colour = 0xff0000
).setup_embed()
embed.add_field(
name = f"#{index} - {self.character.image.icon}{self.character.info.name}{self.character.type.icon}",
value = team_format
)
await self.ctx.send(embed = embed)
## COMBAT FORMAT ##
elif(combat_format):
# posture
posture = None
if(self.character.posture.attacking == True):
posture = posture_icon[0]
if(self.character.posture.charging == True):
posture = posture_icon[1]
if(self.character.posture.defending == True):
posture = posture_icon[2]
if(self.character.posture.stunned == True):
posture = posture_icon[3]
# formatting the embed
combat_format = f"__Health__ : \n**{self.character.health.current:,}** / **{self.character.health.maximum:,}** :hearts: \n"
combat_format += f"__Posture__ : {posture}\n"
combat_format += f"__Damage__ :\n:punch: **{self.character.damage.physical_min:,}** - **{self.character.damage.physical_max:,}** \n{game_icon['ki_ability']} **{self.character.damage.ki_min:,}** - **{self.character.damage.ki_max:,}** \n"
combat_format += f"__Defense__ :\n:shield: **{self.character.defense.armor:,}**\n:rosette: **{self.character.defense.spirit:,}**\n"
combat_format += f"__Ki__ : **{self.character.ki.current}** :fire:"
# now the effects
# buff
if(len(self.character.bonus) > 0): # if the character has a buff
combat_format += f"\n__Bonus__ : "
for buff in self.character.bonus:
await asyncio.sleep(0)
if(buff.is_permanent):
combat_format += f"{buff.icon}[{buff.stack}|*∞*]"
else:
combat_format += f"{buff.icon}[{buff.stack}|{buff.duration}]"
if(len(self.character.malus) > 0):
combat_format += f"\n__Malus__ : "
for debuff in self.character.malus:
await asyncio.sleep(0)
combat_format += f"{debuff.icon}[{debuff.stack}|{debuff.duration}]"
# color
if(team == 0):
embed = await Custom_embed(
self.client,
thumb = self.character.image.thumb,
colour = 0x009dff
).setup_embed()
if(team == 1):
embed = await Custom_embed(
self.client,
thumb = self.character.image.thumb,
colour = 0xff0000
).setup_embed()
# send the messages
embed.add_field(name = f"{self.character.image.icon}{self.character.info.name} {self.character.type.icon}{self.character.rarity.icon}'s infos :", value = combat_format)
await self.ctx.send(embed = embed)
return |
import tensorflow as tf
from tensorflow.keras.preprocessing import image
import numpy as np
#NAME_FILE = "demo/cat.jpg"
model = tf.keras.models.load_model("modelo/model.h5")
def convertir_imagen_para_modelo(nombre_imagen):
img = image.load_img(nombre_imagen, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
return images
def predecir(nombre_imagen):
images = convertir_imagen_para_modelo(nombre_imagen)
classes = model.predict(images, batch_size=10)
if classes[0] > 0.5:
#print(NAME_FILE + " is a dog")
return "DOG"
else:
#print(NAME_FILE + " is a cat")
return "CAT"
if __name__ == "__main__":
predecir(NAME_FILE) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniInnerversionAuditstatusModifyModel(object):
def __init__(self):
self._app_version = None
self._audit_memo = None
self._audit_result = None
self._mini_app_id = None
@property
def app_version(self):
return self._app_version
@app_version.setter
def app_version(self, value):
self._app_version = value
@property
def audit_memo(self):
return self._audit_memo
@audit_memo.setter
def audit_memo(self, value):
self._audit_memo = value
@property
def audit_result(self):
return self._audit_result
@audit_result.setter
def audit_result(self, value):
self._audit_result = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
def to_alipay_dict(self):
params = dict()
if self.app_version:
if hasattr(self.app_version, 'to_alipay_dict'):
params['app_version'] = self.app_version.to_alipay_dict()
else:
params['app_version'] = self.app_version
if self.audit_memo:
if hasattr(self.audit_memo, 'to_alipay_dict'):
params['audit_memo'] = self.audit_memo.to_alipay_dict()
else:
params['audit_memo'] = self.audit_memo
if self.audit_result:
if hasattr(self.audit_result, 'to_alipay_dict'):
params['audit_result'] = self.audit_result.to_alipay_dict()
else:
params['audit_result'] = self.audit_result
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniInnerversionAuditstatusModifyModel()
if 'app_version' in d:
o.app_version = d['app_version']
if 'audit_memo' in d:
o.audit_memo = d['audit_memo']
if 'audit_result' in d:
o.audit_result = d['audit_result']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
return o
|
from flask.json import jsonify
from db import Database
from flask import Flask, render_template, make_response
class Api:
def __init__(self) -> None:
self.app = Flask(__name__)
self.db = Database()
def set_route(self):
self.app.add_url_rule('/', 'index', self.index)
self.app.add_url_rule('/get<int:size>&<string:theme>', 'get', self.get_word, methods=['GET'])
self.app.add_url_rule('/get/', 'get', self.get_word, methods=['GET'])
self.app.add_url_rule('/get_theme', 'gettheme', self.get_theme, methods=['GET'])
def launch(self, host="127.0.0.1", port=8080):
self.set_route()
self.app.run(host=host, port=port)
def get_word(self, size=10, theme='all'):
cur = list(self.db.get(size, theme))
for element in cur:
element.pop('_id', None)
return jsonify(cur)
def get_theme(self):
data = list(self.db.get_themes())
resp = {}
for i in range(len(data)):
resp[i] = data[i]['_id']
return make_response(resp)
def index(self):
return render_template('index.html')
if __name__ == '__main__':
api = Api()
api.launch()
|
# fileName : Plugins/dm/txt2pdf.py
# copyright ©️ 2021 nabilanavab
import os
from fpdf import FPDF
from pdf import PROCESS
from pyrogram import filters
from Configs.dm import Config
from pyrogram import Client as ILovePDF
from pyrogram.types import InlineKeyboardButton
from pyrogram.types import InlineKeyboardMarkup
#--------------->
#--------> config vars
#------------------->
PDF_THUMBNAIL=Config.PDF_THUMBNAIL
BANNED_USERS=Config.BANNED_USERS
ADMIN_ONLY=Config.ADMIN_ONLY
ADMINS=Config.ADMINS
#--------------->
#--------> LOCAL VARIABLES
#------------------->
TXT = {}
UCantUse = "For Some Reason You Can't Use This Bot 🛑"
button=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"😉 Create your Own 😉",
url="https://github.com/nabilanavab/ilovepdf"
)
]
]
)
#--------------->
#--------> REPLY TO /txt2pdf
#------------------->
@ILovePDF.on_message(filters.private & filters.command(["txt2pdf"]) & ~filters.edited)
async def feedback(bot, message):
try:
await message.reply_chat_action("typing")
if (message.chat.id in BANNED_USERS) or (
(ADMIN_ONLY) and (message.chat.id not in ADMINS)
):
await message.reply_text(
UCantUse, reply_markup=button, quote=True
)
return
await message.reply_text(
text="__Now, Please Select A Font Style »__",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Times", callback_data="font|t"),
InlineKeyboardButton("Courier", callback_data="font|c")
],[
InlineKeyboardButton("Helvetica (Default)", callback_data="font|h")
],[
InlineKeyboardButton("Symbol", callback_data="font|s"),
InlineKeyboardButton("Zapfdingbats", callback_data="font|z")
],[
InlineKeyboardButton("🚫 €lose ", callback_data="closeme")
]
]
)
)
await message.delete()
except Exception as e:
print(e)
txt2pdf = filters.create(lambda _, __, query: query.data.startswith("font"))
@ILovePDF.on_callback_query(txt2pdf)
async def _txt2pdf(bot, callbackQuery):
try:
_, font = callbackQuery.data.split("|")
await callbackQuery.message.edit(
text=f"Text to Pdf» Now Select Page Size »",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Portarate", callback_data=f"pgSize|{font}|p")
],[
InlineKeyboardButton("Landscape", callback_data=f"pgSize|{font}|l")
],[
InlineKeyboardButton("« Back «", callback_data=f"txt2pdfBack")
]
]
)
)
except Exception as e:
print(e)
txt2pdfBack = filters.create(lambda _, __, query: query.data == "txt2pdfBack")
@ILovePDF.on_callback_query(txt2pdfBack)
async def _txt2pdfBack(bot, callbackQuery):
try:
await callbackQuery.message.edit(
text="__Now, Please Select A Font Style »__",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Times", callback_data="font|t"),
InlineKeyboardButton("Courier", callback_data="font|c")
],[
InlineKeyboardButton("Helvetica", callback_data="font|h")
],[
InlineKeyboardButton("Symbol", callback_data="font|s"),
InlineKeyboardButton("Zapfdingbats", callback_data="font|z")
],[
InlineKeyboardButton("🚫 €lose ", callback_data="closeme")
]
]
),
disable_web_page_preview=True
)
except Exception as e:
print(e)
pgSize = filters.create(lambda _, __, query: query.data.startswith("pgSize"))
@ILovePDF.on_callback_query(pgSize)
async def _pgSize(bot, callbackQuery):
try:
if callbackQuery.message.chat.id in PROCESS:
await callbackQuery.answer(
"Work in progress.. 🙇"
)
return
bla, _, __ = callbackQuery.data.split("|")
PROCESS.append(callbackQuery.message.chat.id)
TXT[callbackQuery.message.chat.id] = []
nabilanavab=True
while(nabilanavab):
# 1st value will be pdf title
askPDF = await bot.ask(
text="__TEXT TO PDF » Now, please enter a TITLE:__\n\n/exit __to cancel__\n/skip __to skip__",
chat_id=callbackQuery.message.chat.id,
reply_to_message_id=callbackQuery.message.message_id,
filters=None
)
if askPDF.text == "/exit":
await bot.send_message(
callbackQuery.message.chat.id,
"`Process Cancelled..` 😏"
)
PROCESS.remove(callbackQuery.message.chat.id)
del TXT[callbackQuery.message.chat.id]
break
elif askPDF.text == "/skip":
TXT[callbackQuery.message.chat.id].append(None)
nabilanavab=False
elif askPDF.text:
TXT[callbackQuery.message.chat.id].append(f"{askPDF.text}")
nabilanavab=False
# nabilanavab=True ONLY IF PROCESS CANCELLED
if nabilanavab == True:
PROCESS.remove(callbackQuery.message.chat.id)
TXT.remove(callbackQuery.message.chat.id)
return
nabilanavab=True
while(nabilanavab):
# other value will be pdf para
askPDF = await bot.ask(
text=f"__TEXT TO PDF » Now, please enter paragraph {len(TXT[callbackQuery.message.chat.id])-1}:__"
"\n\n/exit __to cancel__\n/create __to create__",
chat_id=callbackQuery.message.chat.id,
reply_to_message_id=callbackQuery.message.message_id,
filters=None
)
if askPDF.text == "/exit":
await bot.send_message(
callbackQuery.message.chat.id,
"`Process Cancelled..` 😏"
)
PROCESS.remove(callbackQuery.message.chat.id)
del TXT[callbackQuery.message.chat.id]
break
elif askPDF.text == "/create":
if TXT[callbackQuery.message.chat.id][0]==None and len(TXT[callbackQuery.message.chat.id])==1:
await bot.send_message(
callbackQuery.message.chat.id,
"Nothing to create.. 😏"
)
else:
processMessage = await callbackQuery.message.reply_text(
"Started Converting txt to Pdf..🎉", quote=True
)
nabilanavab=False
elif askPDF.text:
TXT[callbackQuery.message.chat.id].append(f"{askPDF.text}")
# nabilanavab=True ONLY IF PROCESS CANCELLED
if nabilanavab == True:
PROCESS.remove(callbackQuery.message.chat.id)
TXT.remove(callbackQuery.message.chat.id)
return
# Started Creating PDF
if _ == "t":
font="Times"
elif _ == "c":
font="Courier"
elif _ == "h":
font="Helvetica"
elif _ == "s":
font="Symbol"
elif _ == "z":
font="ZapfDingbats"
pdf = FPDF()
pdf.add_page(orientation=__)
pdf.set_font(font, "B", size=20)
if TXT[callbackQuery.message.chat.id][0] != None:
pdf.cell(200, 20, txt=TXT[callbackQuery.message.chat.id][0], ln=1, align="C")
pdf.set_font(font, size=15)
for _ in TXT[callbackQuery.message.chat.id][1:]:
pdf.multi_cell(200, 10, txt=_, border=0, align="L")
pdf.output(f"{callbackQuery.message.message_id}.pdf")
await callbackQuery.message.reply_chat_action("upload_document")
await processMessage.edit(
"`Started Uploading..` 🏋️"
)
await callbackQuery.message.reply_document(
file_name="txt2.pdf", quote=True,
document=open(f"{callbackQuery.message.message_id}.pdf", "rb"),
thumb=PDF_THUMBNAIL
)
await processMessage.delete()
PROCESS.remove(callbackQuery.message.chat.id)
os.remove(f"{callbackQuery.message.message_id}")
TXT.remove(callbackQuery.message.chat.id)
except Exception as e:
try:
PROCESS.remove(callbackQuery.message.chat.id)
await processMessage.edit(f"`ERROR`: __{e}__")
os.remove(f"{callbackQuery.message.message_id}.pdf")
TXT.remove(callbackQuery.message.chat.id)
except Exception:
pass
# Telegram: @nabilanavab
|
from . import NamedEndpoint
from .urls import LeagueApiV3Urls
class LeagueApiV3(NamedEndpoint):
"""
This class wraps the League-v3 Api calls provided by the Riot API.
See https://developer.riotgames.com/api-methods/#league-v3/ for more detailed information
"""
def __init__(self, base_api):
"""
Initialize a new LeagueApiV3 which uses the provided base_api
:param BaseApi base_api: the root API object to use for making all requests.
"""
super(LeagueApiV3, self).__init__(base_api, self.__class__.__name__)
def challenger_by_queue(self, region, queue):
"""
Get the challenger league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the challenger players for
:returns: LeagueListDTO
"""
url, query = LeagueApiV3Urls.challenger_by_queue(region=region, queue=queue)
return self._raw_request(self.challenger_by_queue.__name__, region, url, query)
def masters_by_queue(self, region, queue):
"""
Get the master league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the challenger players for
:returns: LeagueListDTO
"""
url, query = LeagueApiV3Urls.master_by_queue(region=region, queue=queue)
return self._raw_request(self.masters_by_queue.__name__, region, url, query)
def by_id(self, region, league_id):
"""
Get league with given ID, including inactive entries
:param string region: the region to execute this request on
:param string league_id: the league ID to query
:returns: LeagueListDTO
"""
url, query = LeagueApiV3Urls.by_id(region=region, league_id=league_id)
return self._raw_request(self.by_id.__name__, region, url, query)
def positions_by_summoner(self, region, summoner_id):
"""
Get league positions in all queues for a given summoner ID
:param string region: the region to execute this request on
:param long summoner_id: the summoner ID to query
:returns: Set[LeaguePositionDTO]
"""
url, query = LeagueApiV3Urls.positions_by_summoner(region=region, summoner_id=summoner_id)
return self._raw_request(self.positions_by_summoner.__name__, region, url, query)
|
"""
Copyright (C) 2018 University of Massachusetts Amherst.
This file is part of "coref_tools"
http://github.com/nmonath/coref_tools
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from scipy.spatial.distance import cdist
import ot
from geo.models.ApproxALVec import ApproxALVec
class Wasserstein(ApproxALVec):
def __init__(self, mat, mask, max_num_samples):
super(Wasserstein,self).__init__(mat,mask,max_num_samples)
self.sinkhorn_reg = 2
self.max_sinkhorn_iter = 100
def quick_e_score(self, n1, n2):
"""Pass in an AvgLink and return negative average distance."""
if n1.needs_update:
n1._update()
if n2.needs_update:
n2._update()
# rows i want 1 is num_samples by dim
rows_i_want_1 = n1.mat
# rows i want 2 is num_samples by dim
rows_i_want_2 = n2.mat
# compute the point cloud wasserstein distance between the normalized
# distributions.
M = cdist(rows_i_want_1, rows_i_want_2)
a = np.ones(rows_i_want_1.shape[0]) / rows_i_want_1.shape[0]
b = np.ones(rows_i_want_2.shape[0]) / rows_i_want_2.shape[0]
dist = ot.sinkhorn2(a,b,M,self.sinkhorn_reg,method='sinkhorn_stabilized',numItermax=self.max_sinkhorn_iter)
return -dist[0]
def new(self, point, ment_id=None):
"""Create a new box around point."""
mat = np.zeros((1, np.size(point)))
mat[0] = point
return Wasserstein(mat, None, self.max_num_samples)
def hallucinate_merge(self, other):
"""Return the merger of me and other."""
res = Wasserstein(None, None, self.max_num_samples)
res.needs_update = True
return res |
import numpy as np
from numpy.testing import assert_array_almost_equal
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from rcbm import htuse
def test_calculate_heat_loss_kwh():
"""Output is approx equivalent to DEAP 4.2.0 example A"""
delta_t = pd.Series(
[12.42, 12.23, 10.85, 9.65, 7.15, 4.85, 3.0, 3.28, 5.03, 7.71, 10.38, 11.77],
index=[
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
],
)
hours = pd.Series(
[d * 24 for d in [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]],
index=[
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
],
)
heat_loss_coefficient = pd.Series(
[
121,
150,
]
)
expected_output = np.array(
[
1118.09808,
994.44576,
976.7604,
840.708,
643.6716,
422.532,
270.072,
295.27872,
438.2136,
694.08504,
904.3056,
1059.58248,
1386.072,
1232.784,
1210.86,
1042.2,
797.94,
523.8,
334.8,
366.048,
543.24,
860.436,
1121.04,
1313.532,
]
)
output = htuse._calculate_heat_loss_kwh(
heat_loss_coefficient=heat_loss_coefficient,
delta_t=delta_t,
hours=hours,
)
assert_array_almost_equal(output, expected_output)
def test_heat_loss_per_year():
internal_temperatures = pd.Series(
[
17.72,
17.73,
17.85,
17.95,
18.15,
18.35,
18.50,
18.48,
18.33,
18.11,
17.88,
17.77,
],
index=[
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
],
)
external_temperatures = pd.Series(
[5.3, 5.5, 7.0, 8.3, 11.0, 13.5, 15.5, 15.2, 13.3, 10.4, 7.5, 6.0],
index=[
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
],
)
heat_loss_coefficient = pd.Series(
[
121,
150,
]
)
expected_output = pd.Series([7232, 8965], dtype="float64")
output = htuse.calculate_heat_loss_per_year(
heat_loss_coefficient=heat_loss_coefficient,
internal_temperatures=internal_temperatures,
external_temperatures=external_temperatures,
how="monthly",
)
assert_series_equal(output, expected_output)
|
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import matplotlib.pyplot as plt
import time
import os
import copy
import sys
import psutil
import shutil
import numpy as np
import GPUtil
import cv2
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False
if(isnotebook()):
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm as tqdm
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0";
import mxnet as mx
import torch
from mxnet import autograd as ag
from tabulate import tabulate
################################################################################3
from system.common import read_json
from system.common import write_json
from system.common import parse_csv
from system.common import parse_csv_updated
from system.common import save
from system.summary import print_summary
################################################################################
################################################################################
from gluon.datasets.class_imbalance import balance_class_weights
from gluon.datasets.params import set_input_size
from gluon.datasets.params import set_batch_size
from gluon.datasets.params import set_data_shuffle
from gluon.datasets.params import set_num_processors
from gluon.datasets.params import set_weighted_sampling
from gluon.datasets.csv_dataset import DatasetCustom
from gluon.datasets.paths import set_dataset_train_path
from gluon.datasets.paths import set_dataset_test_path
################################################################################
################################################################################
from system.eda.eda import class_imbalance
from system.eda.eda import corrupted_missing_images
################################################################################
################################################################################
from system.graphs.bar import create_plot
from system.graphs.line import create_train_test_plots_accuracy
from system.graphs.line import create_train_test_plots_loss
################################################################################
################################################################################
from gluon.losses.losses import softmax_crossentropy
from gluon.losses.losses import crossentropy
from gluon.losses.losses import sigmoid_binary_crossentropy
from gluon.losses.losses import binary_crossentropy
from gluon.losses.losses import poisson_nll
from gluon.losses.losses import l1
from gluon.losses.losses import l2
from gluon.losses.losses import kldiv
from gluon.losses.losses import huber
from gluon.losses.losses import hinge
from gluon.losses.losses import squared_hinge
from gluon.losses.return_loss import load_loss
from gluon.losses.retrieve_loss import retrieve_loss
################################################################################
################################################################################
from gluon.models.layers import layer_dropout
from gluon.models.layers import layer_linear
from gluon.models.layers import activation_elu
from gluon.models.layers import activation_leakyrelu
from gluon.models.layers import activation_prelu
from gluon.models.layers import activation_relu
from gluon.models.layers import activation_selu
from gluon.models.layers import activation_sigmoid
from gluon.models.layers import activation_softplus
from gluon.models.layers import activation_softsign
from gluon.models.layers import activation_swish
from gluon.models.layers import activation_tanh
from gluon.models.params import set_model_name
from gluon.models.params import set_device
from gluon.models.params import set_pretrained
from gluon.models.params import set_freeze_base_network
from gluon.models.params import set_model_path
from gluon.models.common import set_parameter_requires_grad
from gluon.models.common import model_to_device
from gluon.models.common import print_grad_stats
from gluon.models.common import get_num_layers
from gluon.models.common import freeze_layers
from gluon.models.return_model import load_model
from gluon.models.return_model import setup_model
from gluon.models.return_model import debug_create_network
################################################################################
################################################################################
from gluon.optimizers.optimizers import sgd
from gluon.optimizers.optimizers import nesterov_sgd
from gluon.optimizers.optimizers import rmsprop
from gluon.optimizers.optimizers import momentum_rmsprop
from gluon.optimizers.optimizers import adam
from gluon.optimizers.optimizers import adagrad
from gluon.optimizers.optimizers import adadelta
from gluon.optimizers.optimizers import adamax
from gluon.optimizers.optimizers import nesterov_adam
from gluon.optimizers.optimizers import signum
from gluon.optimizers.retrieve_optimizer import retrieve_optimizer
from gluon.optimizers.return_optimizer import load_optimizer
################################################################################
################################################################################
from gluon.schedulers.schedulers import scheduler_fixed
from gluon.schedulers.schedulers import scheduler_step
from gluon.schedulers.schedulers import scheduler_multistep
from gluon.schedulers.retrieve_scheduler import retrieve_scheduler
from gluon.schedulers.return_scheduler import load_scheduler
################################################################################
################################################################################
from gluon.testing.process import process_single
################################################################################
################################################################################
from gluon.training.params import set_num_epochs
from gluon.training.params import set_display_progress_realtime
from gluon.training.params import set_display_progress
from gluon.training.params import set_save_intermediate_models
from gluon.training.params import set_save_training_logs
from gluon.training.params import set_intermediate_model_prefix
################################################################################
################################################################################
from gluon.transforms.transforms import transform_random_resized_crop
from gluon.transforms.transforms import transform_center_crop
from gluon.transforms.transforms import transform_color_jitter
from gluon.transforms.transforms import transform_random_horizontal_flip
from gluon.transforms.transforms import transform_random_vertical_flip
from gluon.transforms.transforms import transform_random_lighting
from gluon.transforms.transforms import transform_resize
from gluon.transforms.transforms import transform_normalize
from gluon.transforms.return_transform import set_transform_trainval
from gluon.transforms.return_transform import set_transform_test
from gluon.transforms.retrieve_transform import retrieve_trainval_transforms
from gluon.transforms.retrieve_transform import retrieve_test_transforms
################################################################################
|
# Generated by Django 3.2.5 on 2021-07-03 07:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("pages", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("content", models.TextField(max_length=500)),
("created_on", models.DateTimeField(auto_now_add=True)),
("active", models.BooleanField(default=False)),
(
"page",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to="pages.page",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="replies",
to="cs.comment",
),
),
],
options={
"ordering": ("created_on",),
},
),
]
|
import astropy.io.fits as fits
FolderNameInputs = '../data/'
blind = 'A'
cat_version = 'V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_Fid'
name_tag = 'with_m_bias' # with_m_bias # no_m_bias # bmodes
filename = FolderNameInputs+'/kids/fits/xipm_sys_corrected_KIDS1000_Blind'+blind+'_'+name_tag+'_'+cat_version+'.fits'
F=fits.open(filename)
ext=F['xip']
xip_data_sys_corrected = ext.data['VALUE']
ext=F['xim']
xim_data_sys_corrected = ext.data['VALUE']
F.close()
filename = FolderNameInputs+'/kids/fits/xipm_KIDS1000_Blind'+blind+'_'+name_tag+'_'+cat_version+'.fits'
F=fits.open(filename)
ext=F['xip']
xip_data = ext.data['VALUE']
ext=F['xim']
xim_data = ext.data['VALUE']
F.close()
xip_sys = xip_data - xip_data_sys_corrected
xim_sys = xim_data - xim_data_sys_corrected
filename = FolderNameInputs+'/kids/mock_data/xipm_theory.fits'
F=fits.open(filename)
ext=F['xip']
xip_th = ext.data['VALUE']
ext=F['xim']
xim_th = ext.data['VALUE']
F.close()
xip_th_sys_corrected = xip_th - xip_sys
xim_th_sys_corrected = xim_th - xim_sys
filename = FolderNameInputs+'/kids/mock_data/xipm_theory.fits'
F = fits.open(filename)
ext=F['xip']
data = ext.data
data['VALUE'][:] = xip_th_sys_corrected.copy()
ext=F['xim']
data = ext.data
data['VALUE'][:] = xim_th_sys_corrected.copy()
filename = FolderNameInputs+'/kids/mock_data/xipm_theory_psf_sys_corrected_all_bins.fits'
F.writeto(filename)
F = fits.open(filename)
ext=F['xip']
xip = ext.data['VALUE']
|
from functools import wraps
from flask import session, redirect
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def error(errorCode, ErrorMessage):
'''
Return a Dictionary
with the keys as Code and Message and Status
'''
d={}
d['code'] = errorCode
d['message'] = str(ErrorMessage)
d['status'] = 'Failed'
return d
|
#! single-point UHF/cc-pVDZ on NH2
import os
import qcdb
from ..utils import *
def check_uhf_hf(return_value):
ref = -55.566057523877
nre = 7.580905897627
assert compare_values(ref, qcdb.variable("HF TOTAL ENERGY"), 5, "scf")
assert compare_values(nre, qcdb.variable("NUCLEAR REPULSION ENERGY"), 5, "nre")
@using("nwchem")
def test_1_hf():
nh2 = qcdb.set_molecule(
"""
N 0.08546 -0.00020 -0.05091
H -0.25454 -0.62639 0.67895
H -0.25454 -0.31918 -0.95813
"""
)
qcdb.set_options(
{"basis": "cc-pvdz", "nwchem_scf__uhf": True, "nwchem_scf__nopen": 1, "scf__e_convergence": 1.0e-8}
)
print("Testing hf...")
val = qcdb.energy("nwc-hf")
check_uhf_hf(val)
|
from domain.MedicineException import MedicineException
class MedicineController:
def __init__(self, repo):
self._repo = repo
def addMedicine(self, medicine):
"""
Add a new medicine
Input: medicine - the medicine that will be added
Raises MedicineException in case of duplicate medicine id.
"""
self._repo.add(medicine)
def removeMedicine(self, id):
"""
Remove the medicine with the given id
Input: id - the id of the medicine to remove
Raises MedicineException in case medicine having the given id does not exist
"""
self._repo.remove(id)
def getAll(self):
return self._repo.getAll()
def findMedicineByName(self, name):
"""
Find all medicines having the given name
Input: name - the name of the medicine being searched for
Output: List of medicines having the given name.
"""
result = []
for m in self._repo.getAll():
if name == m.getName():
result.append(m)
return result
def findMedicineById(self, id):
for m in self._repo.getAll():
if id == m.getId():
return m
raise MedicineException("No medicine with the given id found")
def getMedicinesByActiveSubstance(self, activeSubstance):
"""
Returns a sorted list of medicines having the given active substance. The list is sorted ascending by price.
Input: activeSubstance - the active substance searched for
Output: a list of medicines, sorted ascending by price, having the same active substance
"""
result = []
for m in self._repo.getAll():
if activeSubstance == m.getActiveSubstance():
result.append(m)
sortFlag = False
while not sortFlag:
sortFlag = True
for i in range(0, len(result) - 1):
if result[i].getPrice() > result[i + 1].getPrice():
result[i], result[i + 1] = result[i + 1], result[i]
sortFlag = False
return result
def buyMedicine(self, medId, medAm):
m = self.findMedicineById(medId)
if m.getQuantity() <= medAm:
raise MedicineException("Medicine not available in quantity!")
else:
m.setQuantity(m.getQuantity() - medAm)
print("Medicine bought, remaining ", m.getQuantity())
|
from django.test import SimpleTestCase
from mock import patch
from corehq.apps.app_manager.const import AUTO_SELECT_USERCASE
from corehq.apps.app_manager.models import (
AdvancedModule,
AdvancedOpenCaseAction,
Application,
AutoSelectCase,
CaseIndex,
LoadUpdateAction,
Module,
ReportAppConfig,
ReportModule,
)
from corehq.apps.app_manager.util import purge_report_from_mobile_ucr
from corehq.apps.userreports.models import ReportConfiguration
from corehq.util.test_utils import flag_enabled
class ModuleTests(SimpleTestCase):
def setUp(self):
self.app = Application.new_app('domain', "Untitled Application")
self.module = self.app.add_module(Module.new_module('Untitled Module', None))
self.module.case_type = 'another_case_type'
self.form = self.module.new_form("Untitled Form", None)
class AdvancedModuleTests(SimpleTestCase):
def setUp(self):
self.app = Application.new_app('domain', "Untitled Application")
self.module = self.app.add_module(AdvancedModule.new_module('Untitled Module', None))
self.form = self.module.new_form("Untitled Form", None)
def test_registration_form_simple(self):
self.form.actions.open_cases = [
AdvancedOpenCaseAction(
case_tag="phone",
case_type="phone",
name_path="/data/question1",
)
]
self.assertTrue(self.form.is_registration_form())
def test_registration_form_subcase(self):
self.form.actions.load_update_cases.append(LoadUpdateAction(
case_type="parent",
case_tag="parent"
))
self.form.actions.open_cases = [
AdvancedOpenCaseAction(
case_tag="child",
case_type="child",
name_path="/data/question1",
case_indices=[CaseIndex(tag="parent")]
)
]
self.assertTrue(self.form.is_registration_form())
def test_registration_form_autoload(self):
self.form.actions.load_update_cases = [
LoadUpdateAction(
auto_select=AutoSelectCase(mode=AUTO_SELECT_USERCASE, value_key=""),
)
]
self.form.actions.open_cases = [
AdvancedOpenCaseAction(
case_tag="child",
case_type="child",
name_path="/data/question1",
)
]
self.assertTrue(self.form.is_registration_form())
def test_registration_form_autoload_subcase(self):
self.form.actions.load_update_cases = [
LoadUpdateAction(
case_type="parent",
case_tag="parent"
),
LoadUpdateAction(
auto_select=AutoSelectCase(mode=AUTO_SELECT_USERCASE, value_key=""),
)
]
self.form.actions.open_cases = [
AdvancedOpenCaseAction(
case_tag="child",
case_type="child",
name_path="/data/question1",
case_indices=[CaseIndex(tag="parent")]
)
]
self.assertTrue(self.form.is_registration_form())
def test_registration_form_subcase_multiple(self):
self.form.actions.load_update_cases.append(LoadUpdateAction(
case_type="parent",
case_tag="parent"
))
self.form.actions.open_cases = [
AdvancedOpenCaseAction(
case_tag="child",
case_type="child",
name_path="/data/question1",
case_indices=[CaseIndex(tag="parent")]
),
AdvancedOpenCaseAction(
case_tag="grandchild",
case_type="grandchild",
name_path="/data/children/question1",
case_indices=[CaseIndex(tag="child")]
)
]
self.assertFalse(self.form.is_registration_form())
def test_registration_form_subcase_multiple_repeat(self):
self.test_registration_form_subcase_multiple()
self.form.actions.open_cases[-1].repeat_context = "/data/children"
self.assertTrue(self.form.is_registration_form())
class ReportModuleTests(SimpleTestCase):
@flag_enabled('MOBILE_UCR')
@patch('dimagi.ext.couchdbkit.Document.get_db')
def test_purge_report_from_mobile_ucr(self, get_db):
report_config = ReportConfiguration(domain='domain', config_id='foo1')
report_config._id = "my_report_config"
app = Application.new_app('domain', "App")
report_module = app.add_module(ReportModule.new_module('Reports', None))
report_module.report_configs = [
ReportAppConfig(report_id=report_config._id, header={'en': 'CommBugz'}),
ReportAppConfig(report_id='other_config_id', header={'en': 'CommBugz'})
]
self.assertEqual(len(app.modules[0].report_configs), 2)
with patch('corehq.apps.app_manager.util.get_apps_in_domain') as get_apps:
get_apps.return_value = [app]
# this will get called when report_config is deleted
purge_report_from_mobile_ucr(report_config)
self.assertEqual(len(app.modules[0].report_configs), 1)
class OverwriteModuleDetailTests(SimpleTestCase):
def setUp(self):
self.attrs_dict1 = {
'columns': True,
'filter': True,
'sort_elements': True,
'sort_nodeset_columns': True,
'custom_variables': True,
'custom_xml': True,
'case_tile_configuration': True,
'print_template': True
}
self.attrs_dict2 = {
'columns': True,
'filter': True,
'sort_elements': False,
'sort_nodeset_columns': False,
'custom_variables': False,
'custom_xml': False,
'case_tile_configuration': False,
'print_template': False
}
self.attrs_dict3 = {
'columns': False,
'filter': False,
'sort_elements': False,
'sort_nodeset_columns': False,
'custom_variables': False,
'custom_xml': False,
'case_tile_configuration': True,
'print_template': False
}
self.app = Application.new_app('domain', "Untitled Application")
self.src_module = self.app.add_module(Module.new_module('Src Module', lang='en'))
self.src_module_detail_type = getattr(self.src_module.case_details, "short")
self.header_ = getattr(self.src_module_detail_type.columns[0], 'header')
self.header_['en'] = 'status'
self.filter_ = setattr(self.src_module_detail_type, 'filter', 'a > b')
self.sort_nodeset_columns = setattr(self.src_module_detail_type, 'sort_nodeset_columns', True)
self.custom_variables = setattr(self.src_module_detail_type, 'custom_variables', 'def')
self.custom_xml = setattr(self.src_module_detail_type, 'custom_xml', 'ghi')
self.print_template = getattr(self.src_module_detail_type, 'print_template')
self.print_template['name'] = 'test'
self.case_tile_configuration = setattr(self.src_module_detail_type, 'persist_tile_on_forms', True)
def test_overwrite_all(self):
dest_module = self.app.add_module(Module.new_module('Dest Module', lang='en'))
dest_module_detail_type = getattr(dest_module.case_details, "short")
dest_module_detail_type.overwrite_from_module_detail(self.src_module_detail_type, self.attrs_dict1)
self.assertEqual(self.src_module_detail_type.to_json(), dest_module_detail_type.to_json())
def test_overwrite_filter_column(self):
dest_module = self.app.add_module(Module.new_module('Dest Module', lang='en'))
dest_module_detail_type = getattr(dest_module.case_details, "short")
dest_module_detail_type.overwrite_from_module_detail(self.src_module_detail_type, self.attrs_dict2)
self.assertEqual(self.src_module_detail_type.columns, dest_module_detail_type.columns)
self.assertEqual(self.src_module_detail_type.filter, dest_module_detail_type.filter)
self.remove_attrs(dest_module_detail_type)
self.assertNotEqual(self.src_module_detail_type.to_json(), dest_module_detail_type.to_json())
def test_overwrite_other_configs(self):
dest_module = self.app.add_module(Module.new_module('Dest Module', lang='en'))
dest_module_detail_type = getattr(dest_module.case_details, "short")
dest_module_detail_type.overwrite_from_module_detail(self.src_module_detail_type, self.attrs_dict3)
self.assertNotEqual(str(self.src_module_detail_type.columns), str(dest_module_detail_type.columns))
self.assertNotEqual(self.src_module_detail_type.filter, dest_module_detail_type.filter)
self.assertEqual(self.src_module_detail_type.persist_tile_on_forms,
dest_module_detail_type.persist_tile_on_forms)
def remove_attrs(self, dest_module_detail_type):
delattr(self.src_module_detail_type, 'filter')
delattr(self.src_module_detail_type, 'columns')
delattr(dest_module_detail_type, 'filter')
delattr(dest_module_detail_type, 'columns')
|
from flask import Blueprint, current_app, jsonify, request
from euroscipy_dataviz.prediction_plot import generate_plot
api = Blueprint("api", __name__, url_prefix="/api")
@api.after_request
def add_cors_header(response):
response.headers["Access-Control-Allow-Origin"] = "*"
return response
@api.route("/stores")
def available_stores():
"""
Return a list of stores that can be used by the frontend to query data per store.
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
[
{
"id": "1000",
"name": "Karlsruhe"
},
{
"id": "1001",
"name": "Trento"
}
]
"""
stores = current_app.plot_data.drop_duplicates(["store_id"])[
["store_id", "store_name"]
]
return jsonify(stores.to_dict(orient="records"))
@api.route("/products")
def available_products():
"""
Returns a list of products that can be used by the frontend to query data.
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
[
{
"product_id": "1000",
"product_name": "Banana"
},
{
"product_id": "1001",
"product_name": "Apple"
}
]
"""
products = current_app.plot_data.drop_duplicates(["product_id"])[
["product_id", "product_name"]
]
return jsonify(products.to_dict(orient="records"))
@api.route("/plot")
def get_plot():
params = request.args
df = current_app.plot_data
store = params["store"]
product = params["product"]
date_from = params["from"]
date_to = params["to"]
qry = (
f"store_id=='{store}' and product_id=='{product}' and "
f"date >= '{date_from}' and date <= '{date_to}'"
)
df = df.query(qry)
chart = generate_plot(df)
return jsonify(chart.to_dict())
|
import os
import shutil
import keras
import numpy as np
# we are not augmenting the biggest class of 'nv'
class_list = ['mel','bkl','bcc','akiec','vasc','df']
for item in class_list:
aug_dir = 'aug_dir'
os.mkdir(aug_dir)
img_dir = os.path.join(aug_dir, 'img_dir')
os.mkdir(img_dir)
img_class = item
img_list = os.listdir('train/' + img_class)
for fname in img_list:
shutil.copyfile(
os.path.join('train/' + img_class, fname),
os.path.join(img_dir, fname))
datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range=180,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
batch_size = 50
aug_datagen = datagen.flow_from_directory(
'aug_dir',
save_to_dir='train/' + img_class,
save_format='jpg',
target_size=(224,224),
batch_size=batch_size)
num_aug_images_wanted = 6000
num_files = len(os.listdir(img_dir))
num_batches = int(np.ceil((num_aug_images_wanted-num_files)/batch_size))
for i in range(0,num_batches):
imgs, labels = next(aug_datagen)
shutil.rmtree('aug_dir')
print(str(len(os.listdir('train/nv'))) + ' in nv dir')
print(str(len(os.listdir('train/mel'))) + ' in mel dir')
print(str(len(os.listdir('train/bkl'))) + ' in bkl dir')
print(str(len(os.listdir('train/bcc'))) + ' in bcc dir')
print(str(len(os.listdir('train/akiec'))) + ' in akiec dir')
print(str(len(os.listdir('train/vasc'))) + ' in vasc dir')
print(str(len(os.listdir('train/df'))) + ' in df dir')
|
# [Silent Crusade] Unpleasant Meeting
CROW = 9073002
sm.setSpeakerID(CROW)
sm.sendNext("You're the new guy?")
sm.completeQuest(parentID) |
from core.processor import Processor
from utils.OSUtils import OSUtils
class FILE_DELETEProcessor(Processor):
TPL: str = '{"file_path":"path/to/folder/file"}'
DESC: str = f'''
To delete the file associated with given absolute file_path
{TPL}
'''
def process(self):
OSUtils.delete_file_if_existed(
self.expression2str(
self.get_param('file_path')
)
)
|
from api_commons.common import BaseController
class ExampleController(BaseController):
pass |
from functools import partial
from typing import List, Optional
from wai.annotations.core.component import ProcessorComponent
from wai.annotations.core.stream import ThenFunction, DoneFunction
from wai.annotations.core.stream.util import ProcessState, RequiresNoFinalisation
from wai.annotations.domain.image.object_detection import ImageObjectDetectionInstance
from wai.annotations.domain.image.object_detection.util import set_object_label
from wai.common.adams.imaging.locateobjects import LocatedObjects, LocatedObject
from wai.common.cli.options import TypedOption
from .._format import YOLOODFormat, YOLOObject
class FromYOLOOD(
RequiresNoFinalisation,
ProcessorComponent[YOLOODFormat, ImageObjectDetectionInstance]
):
"""
Converter from YOLO annotations to internal format.
"""
# Path to the labels file
labels_file: Optional[str] = TypedOption(
"-l", "--labels",
type=str,
metavar="PATH",
help="Path to the labels file"
)
# Mapping from class index to label
labels: List[str] = ProcessState(lambda self: self.read_labels_file())
def process_element(
self,
element: YOLOODFormat,
then: ThenFunction[ImageObjectDetectionInstance],
done: DoneFunction
):
# Unpack the external format
image_info, yolo_objects = element
# Convert YOLO objects to located objects
located_objects = None
if len(yolo_objects) > 0:
to_located_object = partial(self.to_located_object, image_width=image_info.width, image_height=image_info.height)
located_objects = LocatedObjects(map(to_located_object, yolo_objects))
then(
ImageObjectDetectionInstance(
image_info,
located_objects
)
)
def read_labels_file(self) -> List[str]:
"""
Parses the labels file if one is given.
:return:
The label mapping.
"""
# If no label file is given, return an empty mapping
if self.labels_file is None:
return []
# Read and parse the labels file
with open(self.labels_file, "r") as labels_file:
return labels_file.read().split(",")
def to_located_object(self, object: YOLOObject, *, image_width: int, image_height: int) -> LocatedObject:
"""
Converts the YOLO object to a located object.
:param object:
The YOLO object.
:return:
The located object.
"""
# Get the object label (just uses the class index if no mapping is provided)
label: str = self.labels[object.class_index] if len(self.labels) > 0 else str(object.class_index)
# Get the boundary co-ordinates
width = round(object.width * image_width)
height = round(object.height * image_height)
x_min = round(object.centre_x * image_width - width / 2)
y_min = round(object.centre_y * image_height - height / 2)
# Create the located object
located_object = LocatedObject(x_min, y_min, width, height)
set_object_label(located_object, label)
return located_object
|
import os
import pytest
from HUGS.LocalClient import RankSources
from HUGS.Modules import ObsSurface
from HUGS.ObjectStore import get_local_bucket
# Ensure we have something to rank
@pytest.fixture(scope="session", autouse=True)
def crds():
get_local_bucket(empty=True)
dir_path = os.path.dirname(__file__)
test_data = "../data/proc_test_data/CRDS"
filename = "hfd.picarro.1minute.100m.min.dat"
filepath = os.path.join(dir_path, test_data, filename)
ObsSurface.read_file(filepath=filepath, data_type="CRDS")
def test_ranking(crds):
r = RankSources()
results = r.get_sources(site="hfd", species="co2", data_type="CRDS")
hundred_uuid = results["co2_hfd_100m_picarro"]["uuid"]
del results["co2_hfd_100m_picarro"]["uuid"]
expected_results = {'co2_hfd_100m_picarro': {'rank': 0, 'data_range': '2013-12-04T14:02:30_2019-05-21T15:46:30'}}
assert results == expected_results
rank_daterange = r.create_daterange(start="2013-12-04", end="2016-05-05")
updated = {'co2_hfd_100m_picarro': {'rank': {1: [rank_daterange]}, 'data_range': '2013-12-04T14:02:30_2019-05-21T15:46:30',
"uuid": hundred_uuid}}
r.rank_sources(updated_rankings=updated, data_type="CRDS")
results = r.get_sources(site="hfd", species="co2", data_type="CRDS")
del results["co2_hfd_100m_picarro"]["uuid"]
assert results == {'co2_hfd_100m_picarro': {'rank': {'1': ['2013-12-04T00:00:00_2016-05-05T00:00:00']},
'data_range': '2013-12-04T14:02:30_2019-05-21T15:46:30'}} |
newyear = df.loc["2013-12-31 12:00:00": "2014-01-01 12:00:00", ["north", "south"]] |
from config_local import ES_PRIVATE_HOST, ES_HTTP_AUTH
from web.handlers import EXTRA_HANDLERS
from biothings.web.settings.default import QUERY_KWARGS
# *****************************************************************************
# Elasticsearch Settings
# *****************************************************************************
ES_HOST = ES_PRIVATE_HOST
ES_ARGS = {
"http_auth": ES_HTTP_AUTH
}
ES_INDICES = {
None: "c*,o*", # all indices excluding internal ones
"outbreak": "outbreak_*",
"cd2h": "cd2h-*",
"csbc": "csbc-*"
}
ES_DOC_TYPE = 'doc'
ANNOTATION_DEFAULT_SCOPES = ["_id", "symbol"]
QUERY_KWARGS['*']['_source']['default'] = ["description", "entity"] # restrict results
# *****************************************************************************
# Web Application
# *****************************************************************************
API_PREFIX = 'api'
API_VERSION = 'v1'
# *****************************************************************************
# Elasticsearch Query Pipeline
# *****************************************************************************
#ES_QUERY_PIPELINE = "web.handlers.CD2HQueryPipeline"
#ES_QUERY_BUILDER = "web.handlers.CD2HQueryBuilder"
#AVAILABLE_FIELDS_EXCLUDED = ['all', 'accession_agg', 'refseq_agg'] |
import numpy as np
import os
from azureml.core.run import Run
from scipy.stats import entropy
from ..utils.tfrecords import resize, parse_tfrecord
from .kmeans import *
from ..models import *
run = Run.get_context()
class ClusterFeatureMap(tf.keras.Model):
""""
This is a clustering class with methods to allow batch clustering
of the latent representation generated by classifier
"""
def __init__(self, clustering, classifier, batch_size=16):
super().__init__()
self.clustering = clustering
self.classifier = classifier
self.batch_size = batch_size
def train_step(self, data):
noisy1, label = data[0], data[1]
_, latent = self.classifier.estimate(noisy1)
latent = tf.reduce_mean(latent, axis=(1))
def get_assign():
return self.clustering.assign(latent)
def get_initialize():
return self.clustering.initialize(latent)
centroid_assignment = tf.cond(self.clustering.initialized, get_assign, lambda: tf.zeros_like(latent[:, 0], dtype=tf.int64))
def get_update():
return self.clustering.update(latent, centroid_assignment, label)
l2_adjustment = self.clustering.compute_distance(latent, centroid_assignment)
labels_distance = self.clustering.compute_distance_labels(label, centroid_assignment)
tf.cond(self.clustering.initialized, get_update, get_initialize)
results = {'cluster_dispersion': tf.reduce_sum(l2_adjustment) / self.batch_size,
'cluster_label_distance': tf.reduce_sum(labels_distance) / self.batch_size}
return results
def call(self, data):
noisy1, label = data[0], data[1]
_, latent = self.classifier(noisy1)
latent = tf.reduce_mean(latent, axis=(1))
centroid_assignment = self.cluster.assign(latent)
return centroid_assignment
class SaveCluster(tf.keras.callbacks.Callback):
"""
A callback class for saving clusters
"""
def __init__(self, save_dir):
super().__init__()
self.save_dir = save_dir
def on_epoch_end(self, epoch, logs={}):
centroids = self.model.clustering.centroids.numpy()
labels = self.model.clustering.cluster_labels.numpy()
if hasattr(self.model.clustering, 'centroids_covariance'):
centroids_covariance = self.model.clustering.centroids_covariance.numpy()
np.savez(f'{self.save_dir}/centroids.npz', centroids=centroids, centroid_labels=labels, covariance=centroids_covariance)
else:
np.savez(f'{self.save_dir}/centroids.npz', centroids=centroids, centroid_labels=labels)
# -- label entropy per cluster
labels_without_zeros = labels[labels.sum(-1) > 0]
prob_labels = labels_without_zeros / labels_without_zeros.sum(-1)[:, None]
entropy_clusters = entropy(prob_labels, axis=1)
run.log('entropy_label', entropy_clusters.mean())
class UpdateCluster(tf.keras.callbacks.Callback):
"""
A callback class for updating centroid coordinates
"""
def __init__(self):
super().__init__()
def on_epoch_end(self, epoch, logs={}):
tf.cond(self.model.clustering.initialized, self.model.clustering.reset_centroids, lambda: None)
ch_index = self.model.clustering.compute_calinski_harabasz()
db_index = self.model.clustering.compute_davies_bouldin()
db_labels_index = self.model.clustering.compute_davies_bouldin_labels()
run.log('Calinski-Harabasz Index', float(ch_index))
run.log('Davies-Bouldin Index', float(db_index))
run.log('Davies-Bouldin Labels-Based Index', float(db_labels_index))
def get_data_from_tfrecords(args, num_replicas):
"""
Create a tf.data from tf records in args.train_dir/args.validation_dir
:param args:
:param num_replicas:
:return:
"""
num_frames = args.num_frames
num_mel = args.num_mel
num_labels = args.num_labels
batch_size = args.batch_size * num_replicas
autotune = tf.data.AUTOTUNE
train_filenames = tf.io.gfile.glob(f'{args.train_dir}/*.tfrec')
train_dataset = tf.data.TFRecordDataset(train_filenames, num_parallel_reads=autotune) \
.map(lambda example: parse_tfrecord(example,
num_mel=num_mel,
num_frames=num_frames,
snr=args.snr,
labels=args.labels),
num_parallel_calls=autotune) \
.map(lambda example: resize(example, num_frames=num_frames,
num_mel=num_mel,
num_labels=args.num_labels,
labels=args.labels,
snr=args.snr),
num_parallel_calls=autotune) \
.shuffle(10 * batch_size) \
.batch(batch_size) \
.prefetch(autotune) \
.cache()
return train_dataset
def get_model(args, num_replicas):
"""
Construct tensorflow model from checkpoint in args.path_model_tf
and data loader from args.data_dir
"""
model = globals()[args.model_name](nclass=args.num_labels)
if args.path_model_tf is not None:
model.load_weights(tf.train.latest_checkpoint(args.path_model_tf)).expect_partial()
cluster_algorithm = globals()[args.clustering_name](args.num_clusters, args.embed_dim)
clus = ClusterFeatureMap(cluster_algorithm, model, batch_size=args.batch_size * num_replicas)
clus.compile()
print('Compiling model done')
return clus
def train(args):
"""
Iterate over the batch in the dataset and learn the cluster centers
using args.clustering_name and args.model_name feature map.
:param args:
:return:
"""
if run._run_id.startswith("OfflineRun"):
run.number = 0
strategy = tf.distribute.MirroredStrategy()
save_dir = args.save_dir
save_dir = f'{save_dir}/{args.experiment_name}_{run.number}'
os.makedirs(save_dir, exist_ok=True)
with strategy.scope():
model = get_model(args, strategy.num_replicas_in_sync)
train_loader = get_data_from_tfrecords(args, strategy.num_replicas_in_sync)
model.fit(train_loader,
epochs=args.num_epochs,
callbacks=[SaveCluster(save_dir), UpdateCluster()])
|
from django.db import models
class Stat (models.Model):
name = models.CharField(max_length=15, primary_key=True)
def __str__(self):
return self.name
class Pokemon (models.Model):
name = models.CharField(max_length=30, primary_key=True)
id = models.PositiveIntegerField(unique=True)
height = models.PositiveIntegerField()
weight = models.PositiveIntegerField()
stats = models.ManyToManyField(Stat, through='PokemonStat')
def __str__(self):
return "Nombre: %s\nID: %d\nAltura: %d\nPeso: %d" % (self.name, self.id, self.height, self.weight)
class PokemonStat(models.Model):
pokemon_name = models.ForeignKey(Pokemon, on_delete=models.CASCADE)
stat = models.ForeignKey(Stat, on_delete=models.CASCADE)
base = models.PositiveIntegerField()
def __str__(self):
return "Pokémon: %s\nEstadistica: %s\n Valor base: %d" % (self.pokemon_name.name, self.stat, self.base)
class EvolutionChain(models.Model):
id = models.PositiveIntegerField(primary_key=True)
pokemons = models.ManyToManyField(Pokemon, through="PokemonEvolution")
def __str__(self):
return "Cadena de Evolución: %d\n Pokémon: %s\n" % (self.id, self.pokemons)
class PokemonEvolution(models.Model):
pokemon = models.ForeignKey(Pokemon, on_delete=models.CASCADE)
evolution_chain = models.ForeignKey(EvolutionChain, on_delete=models.CASCADE)
position = models.PositiveSmallIntegerField()
def __str__(self):
return "Cadena de Evolución: %d\n Pokémon: %s\n Posición: %d" % (self.evolution_chain.id, self.pokemon,
self.position)
|
"""Handlers for the app's v1 REST API."""
from __future__ import annotations
from typing import List
from fastapi import APIRouter, Depends
from semaphore.broadcast.repository import BroadcastMessageRepository
from semaphore.config import config
from semaphore.dependencies.broadcastrepo import broadcast_repo_dependency
from .models import BroadcastMessageModel
router = APIRouter(prefix=f"/{config.name}/v1")
"""FastAPI router for all v1 REST API endpoints."""
@router.get(
"/broadcasts",
summary="Get broadcasts",
description="List broadcast messages.",
response_model=List[BroadcastMessageModel],
tags=["broadcasts"],
)
def get_broadcasts(
broadcast_repo: BroadcastMessageRepository = Depends(
broadcast_repo_dependency
),
) -> List[BroadcastMessageModel]:
return [
BroadcastMessageModel.from_broadcast_message(m)
for m in broadcast_repo.iter_active()
]
|
import os
if os.environ['CONFIGURATION'] == 'dev':
DB_URL = "postgresql://postgres:@postgres/priceticker"
elif os.environ['CONFIGURATION'] == 'prod':
DB_URL = "postgresql://postgres:@postgis/priceticker"
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s',
},
'verbose': {
'format': '%(levelname)s (%(name)s) %(asctime)s\n%(pathname)s:%(lineno)d in %(funcName)s\n%(message)s\n'
},
},
'handlers': {
'sentry': {
'level': 'WARNING',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': os.environ['SENTRY_DSN'],
},
'papertrail': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'formatter': 'simple',
'address': ('logs2.papertrailapp.com', 58442),
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'root': {
'level': 'DEBUG',
'handlers': ['console', 'sentry'],
}
}
|
import contextlib
import itertools
import math
import os.path
import pickle
import shutil
import sys
import tempfile
import warnings
from contextlib import ExitStack
from io import BytesIO
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pandas.errors import OutOfBoundsDatetime
import xarray as xr
from xarray import (
DataArray,
Dataset,
backends,
load_dataarray,
load_dataset,
open_dataarray,
open_dataset,
open_mfdataset,
save_mfdataset,
)
from xarray.backends.common import robust_getitem
from xarray.backends.netcdf3 import _nc3_dtype_coercions
from xarray.backends.netCDF4_ import _extract_nc4_variable_encoding
from xarray.backends.pydap_ import PydapDataStore
from xarray.coding.variables import SerializationWarning
from xarray.conventions import encode_dataset_coordinates
from xarray.core import indexing
from xarray.core.options import set_options
from xarray.core.pycompat import dask_array_type
from xarray.tests import LooseVersion, mock
from . import (
arm_xfail,
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
has_dask,
has_netCDF4,
has_scipy,
network,
raises_regex,
requires_cfgrib,
requires_cftime,
requires_dask,
requires_h5netcdf,
requires_netCDF4,
requires_pseudonetcdf,
requires_pydap,
requires_pynio,
requires_rasterio,
requires_scipy,
requires_scipy_or_netCDF4,
requires_zarr,
)
from .test_coding_times import (
_ALL_CALENDARS,
_NON_STANDARD_CALENDARS,
_STANDARD_CALENDARS,
)
from .test_dataset import create_append_test_data, create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask
import dask.array as da
dask_version = dask.__version__
except ImportError:
# needed for xfailed tests when dask < 2.4.0
# remove when min dask > 2.4.0
dask_version = "10.0"
ON_WINDOWS = sys.platform == "win32"
default_value = object()
def open_example_dataset(name, *args, **kwargs):
return open_dataset(
os.path.join(os.path.dirname(__file__), "data", name), *args, **kwargs
)
def open_example_mfdataset(names, *args, **kwargs):
return open_mfdataset(
[os.path.join(os.path.dirname(__file__), "data", name) for name in names],
*args,
**kwargs,
)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2], dtype=np.float32)
encoding = {
"_FillValue": -1,
"add_offset": 10,
"scale_factor": np.float32(0.1),
"dtype": "i2",
}
return Dataset({"x": ("t", x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {"_FillValue": -1, "add_offset": 10, "scale_factor": np.float32(0.1)}
return Dataset({"x": ("t", np.int16([-1, -1, 0, 1, 2]), attributes)})
def create_unsigned_masked_scaled_data():
encoding = {
"_FillValue": 255,
"_Unsigned": "true",
"dtype": "i1",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=np.float32)
return Dataset({"x": ("t", x, {}, encoding)})
def create_encoded_unsigned_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {
"_FillValue": -1,
"_Unsigned": "true",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
# Create unsigned data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([0, 1, 127, -128, -1], dtype="i1")
return Dataset({"x": ("t", sb, attributes)})
def create_bad_unsigned_masked_scaled_data():
encoding = {
"_FillValue": 255,
"_Unsigned": True,
"dtype": "i1",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=np.float32)
return Dataset({"x": ("t", x, {}, encoding)})
def create_bad_encoded_unsigned_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {
"_FillValue": -1,
"_Unsigned": True,
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
# Create signed data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([0, 1, 127, -128, -1], dtype="i1")
return Dataset({"x": ("t", sb, attributes)})
def create_signed_masked_scaled_data():
encoding = {
"_FillValue": -127,
"_Unsigned": "false",
"dtype": "i1",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
x = np.array([-1.0, 10.1, 22.7, np.nan], dtype=np.float32)
return Dataset({"x": ("t", x, {}, encoding)})
def create_encoded_signed_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {
"_FillValue": -127,
"_Unsigned": "false",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
# Create signed data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([-110, 1, 127, -127], dtype="i1")
return Dataset({"x": ("t", sb, attributes)})
def create_boolean_data():
attributes = {"units": "-"}
return Dataset({"x": ("t", [True, False, False, True], attributes)})
class TestCommon:
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray:
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with pytest.raises(UnreliableArrayFailure):
array[0]
assert array[0] == 0
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure, initial_delay=0)
assert actual == 0
class NetCDF3Only:
netcdf3_formats = ("NETCDF3_CLASSIC", "NETCDF3_64BIT")
@requires_scipy
def test_dtype_coercion_error(self):
"""Failing dtype coercion should lead to an error"""
for dtype, format in itertools.product(
_nc3_dtype_coercions, self.netcdf3_formats
):
if dtype == "bool":
# coerced upcast (bool to int8) ==> can never fail
continue
# Using the largest representable value, create some data that will
# no longer compare equal after the coerced downcast
maxval = np.iinfo(dtype).max
x = np.array([0, 1, 2, maxval], dtype=dtype)
ds = Dataset({"x": ("t", x, {})})
with create_tmp_file(allow_cleanup_failure=False) as path:
with pytest.raises(ValueError, match="could not safely cast"):
ds.to_netcdf(path, format=format)
class DatasetIOBase:
engine: Optional[str] = None
file_format: Optional[str] = None
def create_store(self):
raise NotImplementedError()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path:
self.save(data, path, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip_append(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path:
for i, key in enumerate(data.variables):
mode = "a" if i > 0 else "w"
self.save(data[[key]], path, mode=mode, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
# The save/open methods may be overwritten below
def save(self, dataset, path, **kwargs):
return dataset.to_netcdf(
path, engine=self.engine, format=self.file_format, **kwargs
)
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_dataset(path, engine=self.engine, **kwargs) as ds:
yield ds
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected["float_var"] = ([], 1.0e9, {"units": "units of awesome"})
expected["bytes_var"] = ([], b"foobar")
expected["string_var"] = ([], "foobar")
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
with xr.decode_cf(store) as actual:
assert_allclose(expected, actual)
def check_dtypes_roundtripped(self, expected, actual):
for k in expected.variables:
expected_dtype = expected.variables[k].dtype
# For NetCDF3, the backend should perform dtype coercion
if (
isinstance(self, NetCDF3Only)
and str(expected_dtype) in _nc3_dtype_coercions
):
expected_dtype = np.dtype(_nc3_dtype_coercions[str(expected_dtype)])
actual_dtype = actual.variables[k].dtype
# TODO: check expected behavior for string dtypes more carefully
string_kinds = {"O", "S", "U"}
assert expected_dtype == actual_dtype or (
expected_dtype.kind in string_kinds
and actual_dtype.kind in string_kinds
)
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.check_dtypes_roundtripped(expected, actual)
assert_identical(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for k, v in actual.variables.items():
# IndexVariables are eagerly loaded into memory
assert v._in_memory == (k in actual.dims)
yield actual
for k, v in actual.variables.items():
if k in vars:
assert v._in_memory
assert_identical(expected, actual)
with pytest.raises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(["var1", "dim1", "dim2"]) as ds:
ds["var1"].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
assert_identical(expected, actual)
def test_dataset_compute(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
# Test Dataset.compute()
for k, v in actual.variables.items():
# IndexVariables are eagerly cached
assert v._in_memory == (k in actual.dims)
computed = actual.compute()
for k, v in actual.variables.items():
assert v._in_memory == (k in actual.dims)
for v in computed.variables.values():
assert v._in_memory
assert_identical(expected, actual)
assert_identical(expected, computed)
def test_pickle(self):
if not has_dask:
pytest.xfail("pickling requires dask for SerializableLock")
expected = Dataset({"foo": ("x", [42])})
with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
with roundtripped:
# Windows doesn't like reopening an already open file
raw_pickle = pickle.dumps(roundtripped)
with pickle.loads(raw_pickle) as unpickled_ds:
assert_identical(expected, unpickled_ds)
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_pickle_dataarray(self):
if not has_dask:
pytest.xfail("pickling requires dask for SerializableLock")
expected = Dataset({"foo": ("x", [42])})
with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
with roundtripped:
raw_pickle = pickle.dumps(roundtripped["foo"])
# TODO: figure out how to explicitly close the file for the
# unpickled DataArray?
unpickled = pickle.loads(raw_pickle)
assert_identical(expected["foo"], unpickled)
def test_dataset_caching(self):
expected = Dataset({"foo": ("x", [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert isinstance(actual.foo.variable._data, indexing.MemoryCachedArray)
assert not actual.foo.variable._in_memory
actual.foo.values # cache
assert actual.foo.variable._in_memory
with self.roundtrip(expected, open_kwargs={"cache": False}) as actual:
assert isinstance(actual.foo.variable._data, indexing.CopyOnWriteArray)
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_roundtrip_None_variable(self):
expected = Dataset({None: (("x", "y"), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
bytes_ = np.array([b"ab", b"cdef", b"g"], dtype=object)
bytes_nans = np.array([b"ab", b"cdef", np.nan], dtype=object)
strings = np.array(["ab", "cdef", "g"], dtype=object)
strings_nans = np.array(["ab", "cdef", np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset(
{
"floats": ("a", floats),
"floats_nans": ("a", floats_nans),
"bytes": ("b", bytes_),
"bytes_nans": ("b", bytes_nans),
"strings": ("b", strings),
"strings_nans": ("b", strings_nans),
"all_nans": ("c", all_nans),
"nan": ([], np.nan),
}
)
expected = original.copy(deep=True)
with self.roundtrip(original) as actual:
try:
assert_identical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't.
# First try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# This currently includes all netCDF files when encoding is not
# explicitly set.
# https://github.com/pydata/xarray/issues/1647
expected["bytes_nans"][-1] = b""
expected["strings_nans"][-1] = ""
assert_identical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({"x": ("t", ["ab", "cdef"])})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_string_encoded_characters(self):
expected = Dataset({"x": ("t", ["ab", "cdef"])})
expected["x"].encoding["dtype"] = "S1"
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
assert actual["x"].encoding["_Encoding"] == "utf-8"
expected["x"].encoding["_Encoding"] = "ascii"
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
assert actual["x"].encoding["_Encoding"] == "ascii"
@arm_xfail
def test_roundtrip_numpy_datetime_data(self):
times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"])
expected = Dataset({"t": ("t", times), "t0": times[0]})
kwargs = {"encoding": {"t0": {"units": "days since 1950-01-01"}}}
with self.roundtrip(expected, save_kwargs=kwargs) as actual:
assert_identical(expected, actual)
assert actual.t0.encoding["units"] == "days since 1950-01-01"
@requires_cftime
def test_roundtrip_cftime_datetime_data(self):
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({"t": ("t", times), "t0": times[0]})
kwargs = {"encoding": {"t0": {"units": "days since 0001-01-01"}}}
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
expected_calendar = times[0].calendar
with warnings.catch_warnings():
if expected_calendar in {"proleptic_gregorian", "gregorian"}:
warnings.filterwarnings("ignore", "Unable to decode time axis")
with self.roundtrip(expected, save_kwargs=kwargs) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
assert (abs_diff <= np.timedelta64(1, "s")).all()
assert (
actual.t.encoding["units"]
== "days since 0001-01-01 00:00:00.000000"
)
assert actual.t.encoding["calendar"] == expected_calendar
abs_diff = abs(actual.t0.values - expected_decoded_t0)
assert (abs_diff <= np.timedelta64(1, "s")).all()
assert actual.t0.encoding["units"] == "days since 0001-01-01"
assert actual.t.encoding["calendar"] == expected_calendar
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(["1h", "2h", "NaT"])
expected = Dataset({"td": ("td", time_deltas), "td0": time_deltas[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_float64_data(self):
expected = Dataset({"x": ("y", np.array([1.0, 2.0, np.pi], dtype="float64"))})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
with open_example_dataset("example_1.nc") as expected:
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
assert_equal(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset(
{"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])}
)
with self.roundtrip(original) as actual:
assert_identical(original, actual)
original["foo"].encoding["coordinates"] = "y"
with self.roundtrip(original, open_kwargs={"decode_coords": False}) as expected:
# check roundtripping when decode_coords=False
with self.roundtrip(
expected, open_kwargs={"decode_coords": False}
) as actual:
assert_identical(expected, actual)
def test_roundtrip_global_coordinates(self):
original = Dataset(
{"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])}
)
with self.roundtrip(original) as actual:
assert_identical(original, actual)
# test that global "coordinates" is as expected
_, attrs = encode_dataset_coordinates(original)
assert attrs["coordinates"] == "y"
# test warning when global "coordinates" is already set
original.attrs["coordinates"] = "foo"
with pytest.warns(SerializationWarning):
_, attrs = encode_dataset_coordinates(original)
assert attrs["coordinates"] == "foo"
def test_roundtrip_coordinates_with_space(self):
original = Dataset(coords={"x": 0, "y z": 1})
expected = Dataset({"y z": 1}, {"x": 0})
with pytest.warns(SerializationWarning):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_boolean_dtype(self):
original = create_boolean_data()
assert original["x"].dtype == "bool"
with self.roundtrip(original) as actual:
assert_identical(original, actual)
assert actual["x"].dtype == "bool"
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
# make sure the array is not yet loaded into memory
assert not actual["var1"].variable._in_memory
assert_identical(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
@pytest.mark.xfail(
not has_dask,
reason="the code for indexing without dask handles negative steps in slices incorrectly",
)
def test_vectorized_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {
"dim1": DataArray([0, 2, 0], dims="a"),
"dim2": DataArray([0, 2, 3], dims="a"),
}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
# make sure the array is not yet loaded into memory
assert not actual["var1"].variable._in_memory
assert_identical(expected, actual.load())
# do it twice, to make sure we're switched from
# vectorized -> numpy when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
def multiple_indexing(indexers):
# make sure a sequence of lazy indexings certainly works.
with self.roundtrip(in_memory) as on_disk:
actual = on_disk["var3"]
expected = in_memory["var3"]
for ind in indexers:
actual = actual.isel(**ind)
expected = expected.isel(**ind)
# make sure the array is not yet loaded into memory
assert not actual.variable._in_memory
assert_identical(expected, actual.load())
# two-staged vectorized-indexing
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": DataArray([[0, 4], [1, 3], [2, 2]], dims=["a", "b"]),
},
{"a": DataArray([0, 1], dims=["c"]), "b": DataArray([0, 1], dims=["c"])},
]
multiple_indexing(indexers)
# vectorized-slice mixed
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": slice(None, 10),
}
]
multiple_indexing(indexers)
# vectorized-integer mixed
indexers = [
{"dim3": 0},
{"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])},
{"a": slice(None, None, 2)},
]
multiple_indexing(indexers)
# vectorized-integer mixed
indexers = [
{"dim3": 0},
{"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])},
{"a": 1, "b": 0},
]
multiple_indexing(indexers)
# with negative step slice.
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": slice(-1, 1, -1),
}
]
multiple_indexing(indexers)
# with negative step slice.
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": slice(-1, 1, -2),
}
]
multiple_indexing(indexers)
def test_isel_dataarray(self):
# Make sure isel works lazily. GH:issue:1688
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
expected = in_memory.isel(dim2=in_memory["dim2"] < 3)
actual = on_disk.isel(dim2=on_disk["dim2"] < 3)
assert_identical(expected, actual)
def validate_array_type(self, ds):
# Make sure that only NumpyIndexingAdapter stores a bare np.ndarray.
def find_and_validate_array(obj):
# recursively called function. obj: array or array wrapper.
if hasattr(obj, "array"):
if isinstance(obj.array, indexing.ExplicitlyIndexed):
find_and_validate_array(obj.array)
else:
if isinstance(obj.array, np.ndarray):
assert isinstance(obj, indexing.NumpyIndexingAdapter)
elif isinstance(obj.array, dask_array_type):
assert isinstance(obj, indexing.DaskIndexingAdapter)
elif isinstance(obj.array, pd.Index):
assert isinstance(obj, indexing.PandasIndexAdapter)
else:
raise TypeError(
"{} is wrapped by {}".format(type(obj.array), type(obj))
)
for k, v in ds.variables.items():
find_and_validate_array(v._data)
def test_array_type_after_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
self.validate_array_type(on_disk)
indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
self.validate_array_type(actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
self.validate_array_type(actual)
def test_dropna(self):
# regression test for GH:issue:1694
a = np.random.randn(4, 3)
a[1, 1] = np.NaN
in_memory = xr.Dataset(
{"a": (("y", "x"), a)}, coords={"y": np.arange(4), "x": np.arange(3)}
)
assert_identical(
in_memory.dropna(dim="x"), in_memory.isel(x=slice(None, None, 2))
)
with self.roundtrip(in_memory) as on_disk:
self.validate_array_type(on_disk)
expected = in_memory.dropna(dim="x")
actual = on_disk.dropna(dim="x")
assert_identical(expected, actual)
def test_ondisk_after_print(self):
""" Make sure print does not load file into memory """
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
repr(on_disk)
assert not on_disk["var1"]._in_memory
class CFEncodedBase(DatasetIOBase):
def test_roundtrip_bytes_with_fill_value(self):
values = np.array([b"ab", b"cdef", np.nan], dtype=object)
encoding = {"_FillValue": b"X", "dtype": "S1"}
original = Dataset({"x": ("t", values, {}, encoding)})
expected = original.copy(deep=True)
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
original = Dataset({"x": ("t", values, {}, {"_FillValue": b""})})
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_string_with_fill_value_nchar(self):
values = np.array(["ab", "cdef", np.nan], dtype=object)
expected = Dataset({"x": ("t", values)})
encoding = {"dtype": "S1", "_FillValue": b"X"}
original = Dataset({"x": ("t", values, {}, encoding)})
# Not supported yet.
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
@pytest.mark.parametrize(
"decoded_fn, encoded_fn",
[
(
create_unsigned_masked_scaled_data,
create_encoded_unsigned_masked_scaled_data,
),
pytest.param(
create_bad_unsigned_masked_scaled_data,
create_bad_encoded_unsigned_masked_scaled_data,
marks=pytest.mark.xfail(reason="Bad _Unsigned attribute."),
),
(
create_signed_masked_scaled_data,
create_encoded_signed_masked_scaled_data,
),
(create_masked_and_scaled_data, create_encoded_masked_and_scaled_data),
],
)
def test_roundtrip_mask_and_scale(self, decoded_fn, encoded_fn):
decoded = decoded_fn()
encoded = encoded_fn()
with self.roundtrip(decoded) as actual:
for k in decoded.variables:
assert decoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(decoded, actual, decode_bytes=False)
with self.roundtrip(decoded, open_kwargs=dict(decode_cf=False)) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
for k in encoded.variables:
assert encoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(encoded, actual, decode_bytes=False)
with self.roundtrip(encoded, open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
assert encoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(encoded, actual, decode_bytes=False)
# make sure roundtrip encoding didn't change the
# original dataset.
assert_allclose(encoded, encoded_fn(), decode_bytes=False)
with self.roundtrip(encoded) as actual:
for k in decoded.variables:
assert decoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(decoded, actual, decode_bytes=False)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == "lat lon" or obj == "lon lat"
original = Dataset(
{"temp": ("x", [0, 1]), "precip": ("x", [0, -1])},
{"lat": ("x", [2, 3]), "lon": ("x", [4, 5])},
)
with self.roundtrip(original) as actual:
assert_identical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
assert equals_latlon(ds["temp"].attrs["coordinates"])
assert equals_latlon(ds["precip"].attrs["coordinates"])
assert "coordinates" not in ds.attrs
assert "coordinates" not in ds["lat"].attrs
assert "coordinates" not in ds["lon"].attrs
modified = original.drop_vars(["temp", "precip"])
with self.roundtrip(modified) as actual:
assert_identical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
assert equals_latlon(ds.attrs["coordinates"])
assert "coordinates" not in ds["lat"].attrs
assert "coordinates" not in ds["lon"].attrs
original["temp"].encoding["coordinates"] = "lat"
with self.roundtrip(original) as actual:
assert_identical(actual, original)
original["precip"].encoding["coordinates"] = "lat"
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=True) as ds:
assert "lon" not in ds["temp"].encoding["coordinates"]
assert "lon" not in ds["precip"].encoding["coordinates"]
assert "coordinates" not in ds["lat"].encoding
assert "coordinates" not in ds["lon"].encoding
def test_roundtrip_endian(self):
ds = Dataset(
{
"x": np.arange(3, 10, dtype=">i2"),
"y": np.arange(3, 20, dtype="<i4"),
"z": np.arange(3, 30, dtype="=i8"),
"w": ("x", np.arange(3, 10, dtype=float)),
}
)
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
assert_identical(ds, actual)
if self.engine == "netcdf4":
ds["z"].encoding["endian"] = "big"
with pytest.raises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
def test_invalid_dataarray_names_raise(self):
te = (TypeError, "string or None")
ve = (ValueError, "string must be length 1 or")
data = np.random.random((2, 2))
da = xr.DataArray(data)
for name, e in zip([0, (4, 5), True, ""], [te, te, te, ve]):
ds = Dataset({name: da})
with raises_regex(*e):
with self.roundtrip(ds):
pass
def test_encoding_kwarg(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"dtype": "f4"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
encoded_dtype = actual.x.encoding["dtype"]
# On OS X, dtype sometimes switches endianness for unclear reasons
assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4
assert ds.x.encoding == {}
kwargs = dict(encoding={"x": {"foo": "bar"}})
with raises_regex(ValueError, "unexpected encoding"):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={"x": "foo"})
with raises_regex(ValueError, "must be castable"):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={"invalid": {}})
with pytest.raises(KeyError):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
def test_encoding_kwarg_dates(self):
ds = Dataset({"t": pd.date_range("2000-01-01", periods=3)})
units = "days since 1900-01-01"
kwargs = dict(encoding={"t": {"units": units}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.t.encoding["units"] == units
assert_identical(actual, ds)
def test_encoding_kwarg_fixed_width_string(self):
# regression test for GH2149
for strings in [[b"foo", b"bar", b"baz"], ["foo", "bar", "baz"]]:
ds = Dataset({"x": strings})
kwargs = dict(encoding={"x": {"dtype": "S1"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual["x"].encoding["dtype"] == "S1"
assert_identical(actual, ds)
def test_default_fill_value(self):
# Test default encoding for float:
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"dtype": "f4"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert math.isnan(actual.x.encoding["_FillValue"])
assert ds.x.encoding == {}
# Test default encoding for int:
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"dtype": "int16"}})
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".*floating point data as an integer")
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert "_FillValue" not in actual.x.encoding
assert ds.x.encoding == {}
# Test default encoding for implicit int:
ds = Dataset({"x": ("y", np.arange(10, dtype="int16"))})
with self.roundtrip(ds) as actual:
assert "_FillValue" not in actual.x.encoding
assert ds.x.encoding == {}
def test_explicitly_omit_fill_value(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])})
ds.x.encoding["_FillValue"] = None
with self.roundtrip(ds) as actual:
assert "_FillValue" not in actual.x.encoding
def test_explicitly_omit_fill_value_via_encoding_kwarg(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])})
kwargs = dict(encoding={"x": {"_FillValue": None}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert "_FillValue" not in actual.x.encoding
assert ds.y.encoding == {}
def test_explicitly_omit_fill_value_in_coord(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]})
ds.y.encoding["_FillValue"] = None
with self.roundtrip(ds) as actual:
assert "_FillValue" not in actual.y.encoding
def test_explicitly_omit_fill_value_in_coord_via_encoding_kwarg(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]})
kwargs = dict(encoding={"y": {"_FillValue": None}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert "_FillValue" not in actual.y.encoding
assert ds.y.encoding == {}
def test_encoding_same_dtype(self):
ds = Dataset({"x": ("y", np.arange(10.0, dtype="f4"))})
kwargs = dict(encoding={"x": {"dtype": "f4"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
encoded_dtype = actual.x.encoding["dtype"]
# On OS X, dtype sometimes switches endianness for unclear reasons
assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4
assert ds.x.encoding == {}
def test_append_write(self):
# regression for GH1215
data = create_test_data()
with self.roundtrip_append(data) as actual:
assert_identical(data, actual)
def test_append_overwrite_values(self):
# regression for GH1215
data = create_test_data()
with create_tmp_file(allow_cleanup_failure=False) as tmp_file:
self.save(data, tmp_file, mode="w")
data["var2"][:] = -999
data["var9"] = data["var2"] * 3
self.save(data[["var2", "var9"]], tmp_file, mode="a")
with self.open(tmp_file) as actual:
assert_identical(data, actual)
def test_append_with_invalid_dim_raises(self):
data = create_test_data()
with create_tmp_file(allow_cleanup_failure=False) as tmp_file:
self.save(data, tmp_file, mode="w")
data["var9"] = data["var2"] * 3
data = data.isel(dim1=slice(2, 6)) # modify one dimension
with raises_regex(
ValueError, "Unable to update size for existing dimension"
):
self.save(data, tmp_file, mode="a")
def test_multiindex_not_implemented(self):
ds = Dataset(coords={"y": ("x", [1, 2]), "z": ("x", ["a", "b"])}).set_index(
x=["y", "z"]
)
with raises_regex(NotImplementedError, "MultiIndex"):
with self.roundtrip(ds):
pass
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix=".nc", allow_cleanup_failure=False):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, "temp-{}{}".format(next(_counter), suffix))
try:
yield path
finally:
try:
shutil.rmtree(temp_dir)
except OSError:
if not allow_cleanup_failure:
raise
@contextlib.contextmanager
def create_tmp_files(nfiles, suffix=".nc", allow_cleanup_failure=False):
with ExitStack() as stack:
files = [
stack.enter_context(create_tmp_file(suffix, allow_cleanup_failure))
for apath in np.arange(nfiles)
]
yield files
class NetCDF4Base(CFEncodedBase):
"""Tests for both netCDF4-python and h5netcdf."""
engine = "netcdf4"
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as rootgrp:
foogrp = rootgrp.createGroup("foo")
ds = foogrp
ds.createDimension("time", size=10)
x = np.arange(10)
ds.createVariable("x", np.int32, dimensions=("time",))
ds.variables["x"][:] = x
expected = Dataset()
expected["x"] = ("time", x)
# check equivalent ways to specify group
for group in "foo", "/foo", "foo/", "/foo/":
with self.open(tmp_file, group=group) as actual:
assert_equal(actual["x"], expected["x"])
# check that missing group raises appropriate exception
with pytest.raises(IOError):
open_dataset(tmp_file, group="bar")
with raises_regex(ValueError, "must be a string"):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a
# group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, "w")
foogrp = rootgrp.createGroup("foo")
bargrp = foogrp.createGroup("bar")
ds = bargrp
ds.createDimension("time", size=10)
x = np.arange(10)
ds.createVariable("x", np.int32, dimensions=("time",))
ds.variables["x"][:] = x
rootgrp.close()
expected = Dataset()
expected["x"] = ("time", x)
# check equivalent ways to specify group
for group in "foo/bar", "/foo/bar", "foo/bar/", "/foo/bar/":
with self.open(tmp_file, group=group) as actual:
assert_equal(actual["x"], expected["x"])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
self.save(data1, tmp_file, group="data/1")
self.save(data2, tmp_file, group="data/2", mode="a")
with self.open(tmp_file, group="data/1") as actual1:
assert_identical(data1, actual1)
with self.open(tmp_file, group="data/2") as actual2:
assert_identical(data2, actual2)
def test_encoding_kwarg_vlen_string(self):
for input_strings in [[b"foo", b"bar", b"baz"], ["foo", "bar", "baz"]]:
original = Dataset({"x": input_strings})
expected = Dataset({"x": ["foo", "bar", "baz"]})
kwargs = dict(encoding={"x": {"dtype": str}})
with self.roundtrip(original, save_kwargs=kwargs) as actual:
assert actual["x"].encoding["dtype"] is str
assert_identical(actual, expected)
def test_roundtrip_string_with_fill_value_vlen(self):
values = np.array(["ab", "cdef", np.nan], dtype=object)
expected = Dataset({"x": ("t", values)})
# netCDF4-based backends don't support an explicit fillvalue
# for variable length strings yet.
# https://github.com/Unidata/netcdf4-python/issues/730
# https://github.com/shoyer/h5netcdf/issues/37
original = Dataset({"x": ("t", values, {}, {"_FillValue": "XXX"})})
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
original = Dataset({"x": ("t", values, {}, {"_FillValue": ""})})
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([["a", "b", "c"], ["d", "e", "f"]], dtype="S")
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("x", 2)
nc.createDimension("string3", 3)
v = nc.createVariable("x", np.dtype("S1"), ("x", "string3"))
v[:] = values
values = np.array(["abc", "def"], dtype="S")
expected = Dataset({"x": ("x", values)})
with open_dataset(tmp_file) as actual:
assert_identical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
assert_identical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({"x": np.array(["foo", "zzzz"], dtype="S")})
with self.roundtrip(data) as actual:
assert_identical(data, actual)
assert actual["x"].dtype == np.dtype("S4")
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as ds:
ds.createDimension("time", size=10)
ds.createVariable("time", np.int32, dimensions=("time",))
units = "days since 1999-01-01"
ds.variables["time"].setncattr("units", units)
ds.variables["time"][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range("1999-01-05", periods=10)
encoding = {"units": units, "dtype": np.dtype("int32")}
expected["time"] = ("time", time, {}, encoding)
with open_dataset(tmp_file) as actual:
assert_equal(actual["time"], expected["time"])
actual_encoding = {
k: v
for k, v in actual["time"].encoding.items()
if k in expected["time"].encoding
}
assert actual_encoding == expected["time"].encoding
def test_dump_encodings(self):
# regression test for #709
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"zlib": True}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["zlib"]
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as ds:
ds.createDimension("time", size=10)
ds.createVariable("time", np.int32, dimensions=("time",))
units = "days since 1999-01-01"
ds.variables["time"].setncattr("units", units)
ds.variables["time"][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xarray_dataset:
with create_tmp_file() as tmp_file2:
xarray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, "r") as ds:
assert ds.variables["time"].getncattr("units") == units
assert_array_equal(ds.variables["time"], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data["var2"].encoding.update(
{
"zlib": True,
"chunksizes": (5, 5),
"fletcher32": True,
"shuffle": True,
"original_shape": data.var2.shape,
}
)
with self.roundtrip(data) as actual:
for k, v in data["var2"].encoding.items():
assert v == actual["var2"].encoding[k]
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
assert_equal(expected, actual)
def test_encoding_kwarg_compression(self):
ds = Dataset({"x": np.arange(10.0)})
encoding = dict(
dtype="f4",
zlib=True,
complevel=9,
fletcher32=True,
chunksizes=(5,),
shuffle=True,
)
kwargs = dict(encoding=dict(x=encoding))
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert_equal(actual, ds)
assert actual.x.encoding["dtype"] == "f4"
assert actual.x.encoding["zlib"]
assert actual.x.encoding["complevel"] == 9
assert actual.x.encoding["fletcher32"]
assert actual.x.encoding["chunksizes"] == (5,)
assert actual.x.encoding["shuffle"]
assert ds.x.encoding == {}
def test_keep_chunksizes_if_no_original_shape(self):
ds = Dataset({"x": [1, 2, 3]})
chunksizes = (2,)
ds.variables["x"].encoding = {"chunksizes": chunksizes}
with self.roundtrip(ds) as actual:
assert_identical(ds, actual)
assert_array_equal(
ds["x"].encoding["chunksizes"], actual["x"].encoding["chunksizes"]
)
def test_encoding_chunksizes_unlimited(self):
# regression test for GH1225
ds = Dataset({"x": [1, 2, 3], "y": ("x", [2, 3, 4])})
ds.variables["x"].encoding = {
"zlib": False,
"shuffle": False,
"complevel": 0,
"fletcher32": False,
"contiguous": False,
"chunksizes": (2 ** 20,),
"original_shape": (3,),
}
with self.roundtrip(ds) as actual:
assert_equal(ds, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("t", 5)
nc.createVariable("x", "int16", ("t",), fill_value=-1)
v = nc.variables["x"]
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data
# correctly
with nc4.Dataset(tmp_file, mode="r") as nc:
expected = np.ma.array(
[-1, -1, 10, 10.1, 10.2], mask=[True, True, False, False, False]
)
actual = nc.variables["x"][:]
assert_array_equal(expected, actual)
# now check xarray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
assert_identical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
v = nc.createVariable("x", "int16")
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({"x": ((), 123)})
assert_identical(expected, ds)
def test_read_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(["foo", "bar", "baz"], dtype=object)
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("x", 3)
v = nc.createVariable("x", str, ("x",))
v[:] = values
expected = Dataset({"x": ("x", values)})
for kwargs in [{}, {"decode_cf": True}]:
with open_dataset(tmp_file, **kwargs) as actual:
assert_identical(expected, actual)
def test_encoding_unlimited_dims(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
ds.encoding = {"unlimited_dims": ["y"]}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
@requires_netCDF4
class TestNetCDF4Data(NetCDF4Base):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(tmp_file, mode="w") as store:
yield store
def test_variable_order(self):
# doesn't work with scipy or h5py :(
ds = Dataset()
ds["a"] = 1
ds["z"] = 2
ds["b"] = 3
ds.coords["c"] = 4
with self.roundtrip(ds) as actual:
assert list(ds.variables) == list(actual.variables)
def test_unsorted_index_raises(self):
# should be fixed in netcdf4 v1.2.1
random_data = np.random.random(size=(4, 6))
dim0 = [0, 1, 2, 3]
dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step
da = xr.DataArray(
data=random_data,
dims=("dim0", "dim1"),
coords={"dim0": dim0, "dim1": dim1},
name="randovar",
)
ds = da.to_dataset()
with self.roundtrip(ds) as ondisk:
inds = np.argsort(dim1)
ds2 = ondisk.isel(dim1=inds)
# Older versions of NetCDF4 raise an exception here, and if so we
# want to ensure we improve (that is, replace) the error message
try:
ds2.randovar.values
except IndexError as err:
assert "first by calling .load" in str(err)
def test_setncattr_string(self):
list_of_strings = ["list", "of", "strings"]
one_element_list_of_strings = ["one element"]
one_string = "one string"
attrs = {
"foo": list_of_strings,
"bar": one_element_list_of_strings,
"baz": one_string,
}
ds = Dataset({"x": ("y", [1, 2, 3], attrs)}, attrs=attrs)
with self.roundtrip(ds) as actual:
for totest in [actual, actual["x"]]:
assert_array_equal(list_of_strings, totest.attrs["foo"])
assert_array_equal(one_element_list_of_strings, totest.attrs["bar"])
assert one_string == totest.attrs["baz"]
def test_autoclose_future_warning(self):
data = create_test_data()
with create_tmp_file() as tmp_file:
self.save(data, tmp_file)
with pytest.warns(FutureWarning):
with self.open(tmp_file, autoclose=True) as actual:
assert_identical(data, actual)
def test_already_open_dataset(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
v = nc.createVariable("x", "int")
v[...] = 42
nc = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(nc)
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
def test_already_open_dataset_group(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
group = nc.createGroup("g")
v = group.createVariable("x", "int")
v[...] = 42
nc = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(nc.groups["g"])
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
nc = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(nc, group="g")
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
with nc4.Dataset(tmp_file, mode="r") as nc:
with pytest.raises(ValueError, match="must supply a root"):
backends.NetCDF4DataStore(nc.groups["g"], group="g")
@requires_netCDF4
@requires_dask
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
class TestNetCDF4ViaDaskData(TestNetCDF4Data):
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if open_kwargs is None:
open_kwargs = {}
if save_kwargs is None:
save_kwargs = {}
open_kwargs.setdefault("chunks", -1)
with TestNetCDF4Data.roundtrip(
self, data, save_kwargs, open_kwargs, allow_cleanup_failure
) as ds:
yield ds
def test_unsorted_index_raises(self):
# Skip when using dask because dask rewrites indexers to getitem,
# dask first pulls items by block.
pass
def test_dataset_caching(self):
# caching behavior differs for dask
pass
def test_write_inconsistent_chunks(self):
# Construct two variables with the same dimensions, but different
# chunk sizes.
x = da.zeros((100, 100), dtype="f4", chunks=(50, 100))
x = DataArray(data=x, dims=("lat", "lon"), name="x")
x.encoding["chunksizes"] = (50, 100)
x.encoding["original_shape"] = (100, 100)
y = da.ones((100, 100), dtype="f4", chunks=(100, 50))
y = DataArray(data=y, dims=("lat", "lon"), name="y")
y.encoding["chunksizes"] = (100, 50)
y.encoding["original_shape"] = (100, 100)
# Put them both into the same dataset
ds = Dataset({"x": x, "y": y})
with self.roundtrip(ds) as actual:
assert actual["x"].encoding["chunksizes"] == (50, 100)
assert actual["y"].encoding["chunksizes"] == (100, 50)
@requires_zarr
class ZarrBase(CFEncodedBase):
DIMENSION_KEY = "_ARRAY_DIMENSIONS"
@contextlib.contextmanager
def create_store(self):
with self.create_zarr_target() as store_target:
yield backends.ZarrStore.open_group(store_target, mode="w")
def save(self, dataset, store_target, **kwargs):
return dataset.to_zarr(store=store_target, **kwargs)
@contextlib.contextmanager
def open(self, store_target, **kwargs):
with xr.open_zarr(store_target, **kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with self.create_zarr_target() as store_target:
self.save(data, store_target, **save_kwargs)
with self.open(store_target, **open_kwargs) as ds:
yield ds
def test_roundtrip_consolidated(self):
pytest.importorskip("zarr", minversion="2.2.1.dev2")
expected = create_test_data()
with self.roundtrip(
expected,
save_kwargs={"consolidated": True},
open_kwargs={"consolidated": True},
) as actual:
self.check_dtypes_roundtripped(expected, actual)
assert_identical(expected, actual)
@requires_dask
def test_auto_chunk(self):
original = create_test_data().chunk()
with self.roundtrip(original, open_kwargs={"chunks": None}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
with self.roundtrip(original, open_kwargs={"chunks": "auto"}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as original
assert v.chunks == original[k].chunks
@requires_dask
@pytest.mark.filterwarnings("ignore:Specified Dask chunks")
def test_manual_chunk(self):
original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3})
# All of these should return non-chunked arrays
NO_CHUNKS = (None, 0, {})
for no_chunk in NO_CHUNKS:
open_kwargs = {"chunks": no_chunk}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
# uniform arrays
for i in range(2, 6):
rechunked = original.chunk(chunks=i)
open_kwargs = {"chunks": i}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as rechunked
assert v.chunks == rechunked[k].chunks
chunks = {"dim1": 2, "dim2": 3, "dim3": 5}
rechunked = original.chunk(chunks=chunks)
open_kwargs = {"chunks": chunks, "overwrite_encoded_chunks": True}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
assert v.chunks == rechunked[k].chunks
with self.roundtrip(actual) as auto:
# encoding should have changed
for k, v in actual.variables.items():
assert v.chunks == rechunked[k].chunks
assert_identical(actual, auto)
assert_identical(actual.load(), auto.load())
@requires_dask
def test_warning_on_bad_chunks(self):
original = create_test_data().chunk({"dim1": 4, "dim2": 3, "dim3": 5})
bad_chunks = (2, {"dim2": (3, 3, 2, 1)})
for chunks in bad_chunks:
kwargs = {"chunks": chunks}
with pytest.warns(UserWarning):
with self.roundtrip(original, open_kwargs=kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
good_chunks = ({"dim2": 3}, {"dim3": 10})
for chunks in good_chunks:
kwargs = {"chunks": chunks}
with pytest.warns(None) as record:
with self.roundtrip(original, open_kwargs=kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
assert len(record) == 0
@requires_dask
def test_deprecate_auto_chunk(self):
original = create_test_data().chunk()
with pytest.warns(FutureWarning):
with self.roundtrip(original, open_kwargs={"auto_chunk": True}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as original
assert v.chunks == original[k].chunks
with pytest.warns(FutureWarning):
with self.roundtrip(original, open_kwargs={"auto_chunk": False}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
@requires_dask
def test_write_uneven_dask_chunks(self):
# regression for GH#2225
original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3})
with self.roundtrip(original, open_kwargs={"chunks": "auto"}) as actual:
for k, v in actual.data_vars.items():
print(k)
assert v.chunks == actual[k].chunks
def test_chunk_encoding(self):
# These datasets have no dask chunks. All chunking specified in
# encoding
data = create_test_data()
chunks = (5, 5)
data["var2"].encoding.update({"chunks": chunks})
with self.roundtrip(data) as actual:
assert chunks == actual["var2"].encoding["chunks"]
# expect an error with non-integer chunks
data["var2"].encoding.update({"chunks": (5, 4.5)})
with pytest.raises(TypeError):
with self.roundtrip(data) as actual:
pass
@requires_dask
def test_chunk_encoding_with_dask(self):
# These datasets DO have dask chunks. Need to check for various
# interactions between dask and zarr chunks
ds = xr.DataArray((np.arange(12)), dims="x", name="var1").to_dataset()
# - no encoding specified -
# zarr automatically gets chunk information from dask chunks
ds_chunk4 = ds.chunk({"x": 4})
with self.roundtrip(ds_chunk4) as actual:
assert (4,) == actual["var1"].encoding["chunks"]
# should fail if dask_chunks are irregular...
ds_chunk_irreg = ds.chunk({"x": (5, 4, 3)})
with raises_regex(ValueError, "uniform chunk sizes."):
with self.roundtrip(ds_chunk_irreg) as actual:
pass
# should fail if encoding["chunks"] clashes with dask_chunks
badenc = ds.chunk({"x": 4})
badenc.var1.encoding["chunks"] = (6,)
with raises_regex(NotImplementedError, "named 'var1' would overlap"):
with self.roundtrip(badenc) as actual:
pass
badenc.var1.encoding["chunks"] = (2,)
with raises_regex(ValueError, "Specified Zarr chunk encoding"):
with self.roundtrip(badenc) as actual:
pass
badenc = badenc.chunk({"x": (3, 3, 6)})
badenc.var1.encoding["chunks"] = (3,)
with raises_regex(ValueError, "incompatible with this encoding"):
with self.roundtrip(badenc) as actual:
pass
# ... except if the last chunk is smaller than the first
ds_chunk_irreg = ds.chunk({"x": (5, 5, 2)})
with self.roundtrip(ds_chunk_irreg) as actual:
assert (5,) == actual["var1"].encoding["chunks"]
# re-save Zarr arrays
with self.roundtrip(ds_chunk_irreg) as original:
with self.roundtrip(original) as actual:
assert_identical(original, actual)
# - encoding specified -
# specify compatible encodings
for chunk_enc in 4, (4,):
ds_chunk4["var1"].encoding.update({"chunks": chunk_enc})
with self.roundtrip(ds_chunk4) as actual:
assert (4,) == actual["var1"].encoding["chunks"]
# TODO: remove this failure once syncronized overlapping writes are
# supported by xarray
ds_chunk4["var1"].encoding.update({"chunks": 5})
with pytest.raises(NotImplementedError):
with self.roundtrip(ds_chunk4) as actual:
pass
def test_hidden_zarr_keys(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
zarr_group = store.ds
# check that a variable hidden attribute is present and correct
# JSON only has a single array type, which maps to list in Python.
# In contrast, dims in xarray is always a tuple.
for var in expected.variables.keys():
dims = zarr_group[var].attrs[self.DIMENSION_KEY]
assert dims == list(expected[var].dims)
with xr.decode_cf(store):
# make sure it is hidden
for var in expected.variables.keys():
assert self.DIMENSION_KEY not in expected[var].attrs
# put it back and try removing from a variable
del zarr_group.var2.attrs[self.DIMENSION_KEY]
with pytest.raises(KeyError):
with xr.decode_cf(store):
pass
@pytest.mark.skipif(LooseVersion(dask_version) < "2.4", reason="dask GH5334")
@pytest.mark.parametrize("group", [None, "group1"])
def test_write_persistence_modes(self, group):
original = create_test_data()
# overwrite mode
with self.roundtrip(
original,
save_kwargs={"mode": "w", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# don't overwrite mode
with self.roundtrip(
original,
save_kwargs={"mode": "w-", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# make sure overwriting works as expected
with self.create_zarr_target() as store:
self.save(original, store)
# should overwrite with no error
self.save(original, store, mode="w", group=group)
with self.open(store, group=group) as actual:
assert_identical(original, actual)
with pytest.raises(ValueError):
self.save(original, store, mode="w-")
# check append mode for normal write
with self.roundtrip(
original,
save_kwargs={"mode": "a", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# check append mode for append write
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", group=group)
ds_to_append.to_zarr(store_target, append_dim="time", group=group)
original = xr.concat([ds, ds_to_append], dim="time")
actual = xr.open_zarr(store_target, group=group)
assert_identical(original, actual)
def test_compressor_encoding(self):
original = create_test_data()
# specify a custom compressor
import zarr
blosc_comp = zarr.Blosc(cname="zstd", clevel=3, shuffle=2)
save_kwargs = dict(encoding={"var1": {"compressor": blosc_comp}})
with self.roundtrip(original, save_kwargs=save_kwargs) as ds:
actual = ds["var1"].encoding["compressor"]
# get_config returns a dictionary of compressor attributes
assert actual.get_config() == blosc_comp.get_config()
def test_group(self):
original = create_test_data()
group = "some/random/path"
with self.roundtrip(
original, save_kwargs={"group": group}, open_kwargs={"group": group}
) as actual:
assert_identical(original, actual)
def test_encoding_kwarg_fixed_width_string(self):
# not relevant for zarr, since we don't use EncodedStringCoder
pass
# TODO: someone who understand caching figure out whether caching
# makes sense for Zarr backend
@pytest.mark.xfail(reason="Zarr caching not implemented")
def test_dataset_caching(self):
super().test_dataset_caching()
@pytest.mark.skipif(LooseVersion(dask_version) < "2.4", reason="dask GH5334")
def test_append_write(self):
super().test_append_write()
def test_append_with_invalid_dim_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(
ValueError, match="does not match any existing dataset dimensions"
):
ds_to_append.to_zarr(store_target, append_dim="notvalid")
def test_append_with_no_dims_raises(self):
with self.create_zarr_target() as store_target:
Dataset({"foo": ("x", [1])}).to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="different dimension names"):
Dataset({"foo": ("y", [2])}).to_zarr(store_target, mode="a")
def test_append_with_append_dim_not_set_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="different dimension sizes"):
ds_to_append.to_zarr(store_target, mode="a")
def test_append_with_mode_not_a_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(
ValueError, match="append_dim was set along with mode='w'"
):
ds_to_append.to_zarr(store_target, mode="w", append_dim="time")
def test_append_with_existing_encoding_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="but encoding was provided"):
ds_to_append.to_zarr(
store_target,
append_dim="time",
encoding={"da": {"compressor": None}},
)
def test_check_encoding_is_consistent_after_append(self):
ds, ds_to_append, _ = create_append_test_data()
# check encoding consistency
with self.create_zarr_target() as store_target:
import zarr
compressor = zarr.Blosc()
encoding = {"da": {"compressor": compressor}}
ds.to_zarr(store_target, mode="w", encoding=encoding)
ds_to_append.to_zarr(store_target, append_dim="time")
actual_ds = xr.open_zarr(store_target)
actual_encoding = actual_ds["da"].encoding["compressor"]
assert actual_encoding.get_config() == compressor.get_config()
assert_identical(
xr.open_zarr(store_target).compute(),
xr.concat([ds, ds_to_append], dim="time"),
)
@pytest.mark.skipif(LooseVersion(dask_version) < "2.4", reason="dask GH5334")
def test_append_with_new_variable(self):
ds, ds_to_append, ds_with_new_var = create_append_test_data()
# check append mode for new variable
with self.create_zarr_target() as store_target:
xr.concat([ds, ds_to_append], dim="time").to_zarr(store_target, mode="w")
ds_with_new_var.to_zarr(store_target, mode="a")
combined = xr.concat([ds, ds_to_append], dim="time")
combined["new_var"] = ds_with_new_var["new_var"]
assert_identical(combined, xr.open_zarr(store_target))
@requires_dask
def test_to_zarr_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = create_test_data().chunk()
with self.create_zarr_target() as store:
delayed_obj = self.save(original, store, compute=False)
assert isinstance(delayed_obj, Delayed)
# make sure target store has not been written to yet
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(original, actual)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(original, actual)
@requires_dask
def test_to_zarr_append_compute_false_roundtrip(self):
from dask.delayed import Delayed
ds, ds_to_append, _ = create_append_test_data()
ds, ds_to_append = ds.chunk(), ds_to_append.chunk()
with pytest.warns(SerializationWarning):
with self.create_zarr_target() as store:
delayed_obj = self.save(ds, store, compute=False, mode="w")
assert isinstance(delayed_obj, Delayed)
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(ds, actual)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(ds, actual)
delayed_obj = self.save(
ds_to_append, store, compute=False, append_dim="time"
)
assert isinstance(delayed_obj, Delayed)
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(
xr.concat([ds, ds_to_append], dim="time"), actual
)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(xr.concat([ds, ds_to_append], dim="time"), actual)
@requires_dask
def test_encoding_chunksizes(self):
# regression test for GH2278
# see also test_encoding_chunksizes_unlimited
nx, ny, nt = 4, 4, 5
original = xr.Dataset(
{}, coords={"x": np.arange(nx), "y": np.arange(ny), "t": np.arange(nt)}
)
original["v"] = xr.Variable(("x", "y", "t"), np.zeros((nx, ny, nt)))
original = original.chunk({"t": 1, "x": 2, "y": 2})
with self.roundtrip(original) as ds1:
assert_equal(ds1, original)
with self.roundtrip(ds1.isel(t=0)) as ds2:
assert_equal(ds2, original.isel(t=0))
@requires_zarr
class TestZarrDictStore(ZarrBase):
@contextlib.contextmanager
def create_zarr_target(self):
yield {}
@requires_zarr
class TestZarrDirectoryStore(ZarrBase):
@contextlib.contextmanager
def create_zarr_target(self):
with create_tmp_file(suffix=".zarr") as tmp:
yield tmp
@requires_scipy
class TestScipyInMemoryData(CFEncodedBase, NetCDF3Only):
engine = "scipy"
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, "w")
def test_to_netcdf_explicit_engine(self):
# regression test for GH1321
Dataset({"foo": 42}).to_netcdf(engine="scipy")
def test_bytes_pickle(self):
data = Dataset({"foo": ("x", [1, 2, 3])})
fobj = data.to_netcdf()
with self.open(fobj) as ds:
unpickled = pickle.loads(pickle.dumps(ds))
assert_identical(unpickled, data)
@requires_scipy
class TestScipyFileObject(CFEncodedBase, NetCDF3Only):
engine = "scipy"
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, "w")
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file() as tmp_file:
with open(tmp_file, "wb") as f:
self.save(data, f, **save_kwargs)
with open(tmp_file, "rb") as f:
with self.open(f, **open_kwargs) as ds:
yield ds
@pytest.mark.skip(reason="cannot pickle file objects")
def test_pickle(self):
pass
@pytest.mark.skip(reason="cannot pickle file objects")
def test_pickle_dataarray(self):
pass
@requires_scipy
class TestScipyFilePath(CFEncodedBase, NetCDF3Only):
engine = "scipy"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode="w") as store:
yield store
def test_array_attrs(self):
ds = Dataset(attrs={"foo": [[1, 2], [3, 4]]})
with raises_regex(ValueError, "must be 1-dimensional"):
with self.roundtrip(ds):
pass
def test_roundtrip_example_1_netcdf_gz(self):
with open_example_dataset("example_1.nc.gz") as expected:
with open_example_dataset("example_1.nc") as actual:
assert_identical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
with open_example_dataset("bears.nc", engine="scipy") as expected:
for var in expected.variables.values():
assert var.dtype.isnative
@requires_netCDF4
def test_nc4_scipy(self):
with create_tmp_file(allow_cleanup_failure=True) as tmp_file:
with nc4.Dataset(tmp_file, "w", format="NETCDF4") as rootgrp:
rootgrp.createGroup("foo")
with raises_regex(TypeError, "pip install netcdf4"):
open_dataset(tmp_file, engine="scipy")
@requires_netCDF4
class TestNetCDF3ViaNetCDF4Data(CFEncodedBase, NetCDF3Only):
engine = "netcdf4"
file_format = "NETCDF3_CLASSIC"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode="w", format="NETCDF3_CLASSIC"
) as store:
yield store
def test_encoding_kwarg_vlen_string(self):
original = Dataset({"x": ["foo", "bar", "baz"]})
kwargs = dict(encoding={"x": {"dtype": str}})
with raises_regex(ValueError, "encoding dtype=str for vlen"):
with self.roundtrip(original, save_kwargs=kwargs):
pass
@requires_netCDF4
class TestNetCDF4ClassicViaNetCDF4Data(CFEncodedBase, NetCDF3Only):
engine = "netcdf4"
file_format = "NETCDF4_CLASSIC"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode="w", format="NETCDF4_CLASSIC"
) as store:
yield store
@requires_scipy_or_netCDF4
class TestGenericNetCDFData(CFEncodedBase, NetCDF3Only):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
file_format = "netcdf3_64bit"
def test_write_store(self):
# there's no specific store to test here
pass
@requires_scipy
def test_engine(self):
data = create_test_data()
with raises_regex(ValueError, "unrecognized engine"):
data.to_netcdf("foo.nc", engine="foobar")
with raises_regex(ValueError, "invalid engine"):
data.to_netcdf(engine="netcdf4")
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with raises_regex(ValueError, "unrecognized engine"):
open_dataset(tmp_file, engine="foobar")
netcdf_bytes = data.to_netcdf()
with raises_regex(ValueError, "unrecognized engine"):
open_dataset(BytesIO(netcdf_bytes), engine="foobar")
def test_cross_engine_read_write_netcdf3(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add("netcdf4")
if has_scipy:
valid_engines.add("scipy")
for write_engine in valid_engines:
for format in self.netcdf3_formats:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
# hack to allow test to work:
# coord comes back as DataArray rather than coord,
# and so need to loop through here rather than in
# the test function (or we get recursion)
[
assert_allclose(data[k].variable, actual[k].variable)
for k in data.variables
]
def test_encoding_unlimited_dims(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
# Regression test for https://github.com/pydata/xarray/issues/2134
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
ds.encoding = {"unlimited_dims": ["y"]}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
# Regression test for https://github.com/pydata/xarray/issues/2134
ds.encoding = {"unlimited_dims": "y"}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
@requires_h5netcdf
@requires_netCDF4
@pytest.mark.filterwarnings("ignore:use make_scale(name) instead")
class TestH5NetCDFData(NetCDF4Base):
engine = "h5netcdf"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore.open(tmp_file, "w")
@pytest.mark.filterwarnings("ignore:complex dtypes are supported by h5py")
@pytest.mark.parametrize(
"invalid_netcdf, warntype, num_warns",
[(None, FutureWarning, 1), (False, FutureWarning, 1), (True, None, 0)],
)
def test_complex(self, invalid_netcdf, warntype, num_warns):
expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))})
save_kwargs = {"invalid_netcdf": invalid_netcdf}
with pytest.warns(warntype) as record:
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_equal(expected, actual)
recorded_num_warns = 0
if warntype:
for warning in record:
if issubclass(warning.category, warntype) and (
"complex dtypes" in str(warning.message)
):
recorded_num_warns += 1
assert recorded_num_warns == num_warns
def test_cross_engine_read_write_netcdf4(self):
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
data = create_test_data().drop_vars("dim3")
data.attrs["foo"] = "bar"
valid_engines = ["netcdf4", "h5netcdf"]
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
assert_identical(data, actual)
def test_read_byte_attrs_as_unicode(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as nc:
nc.foo = b"bar"
with open_dataset(tmp_file) as actual:
expected = Dataset(attrs={"foo": "bar"})
assert_identical(expected, actual)
def test_encoding_unlimited_dims(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
ds.encoding = {"unlimited_dims": ["y"]}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
def test_compression_encoding_h5py(self):
ENCODINGS = (
# h5py style compression with gzip codec will be converted to
# NetCDF4-Python style on round-trip
(
{"compression": "gzip", "compression_opts": 9},
{"zlib": True, "complevel": 9},
),
# What can't be expressed in NetCDF4-Python style is
# round-tripped unaltered
(
{"compression": "lzf", "compression_opts": None},
{"compression": "lzf", "compression_opts": None},
),
# If both styles are used together, h5py format takes precedence
(
{
"compression": "lzf",
"compression_opts": None,
"zlib": True,
"complevel": 9,
},
{"compression": "lzf", "compression_opts": None},
),
)
for compr_in, compr_out in ENCODINGS:
data = create_test_data()
compr_common = {
"chunksizes": (5, 5),
"fletcher32": True,
"shuffle": True,
"original_shape": data.var2.shape,
}
data["var2"].encoding.update(compr_in)
data["var2"].encoding.update(compr_common)
compr_out.update(compr_common)
data["scalar"] = ("scalar_dim", np.array([2.0]))
data["scalar"] = data["scalar"][0]
with self.roundtrip(data) as actual:
for k, v in compr_out.items():
assert v == actual["var2"].encoding[k]
def test_compression_check_encoding_h5py(self):
"""When mismatched h5py and NetCDF4-Python encodings are expressed
in to_netcdf(encoding=...), must raise ValueError
"""
data = Dataset({"x": ("y", np.arange(10.0))})
# Compatible encodings are graciously supported
with create_tmp_file() as tmp_file:
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={
"x": {
"compression": "gzip",
"zlib": True,
"compression_opts": 6,
"complevel": 6,
}
},
)
with open_dataset(tmp_file, engine="h5netcdf") as actual:
assert actual.x.encoding["zlib"] is True
assert actual.x.encoding["complevel"] == 6
# Incompatible encodings cause a crash
with create_tmp_file() as tmp_file:
with raises_regex(
ValueError, "'zlib' and 'compression' encodings mismatch"
):
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={"x": {"compression": "lzf", "zlib": True}},
)
with create_tmp_file() as tmp_file:
with raises_regex(
ValueError, "'complevel' and 'compression_opts' encodings mismatch"
):
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={
"x": {
"compression": "gzip",
"compression_opts": 5,
"complevel": 6,
}
},
)
def test_dump_encodings_h5py(self):
# regression test for #709
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = {"encoding": {"x": {"compression": "gzip", "compression_opts": 9}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["zlib"]
assert actual.x.encoding["complevel"] == 9
kwargs = {"encoding": {"x": {"compression": "lzf", "compression_opts": None}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["compression"] == "lzf"
assert actual.x.encoding["compression_opts"] is None
def test_already_open_dataset_group(self):
import h5netcdf
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
group = nc.createGroup("g")
v = group.createVariable("x", "int")
v[...] = 42
h5 = h5netcdf.File(tmp_file, mode="r")
store = backends.H5NetCDFStore(h5["g"])
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
h5 = h5netcdf.File(tmp_file, mode="r")
store = backends.H5NetCDFStore(h5, group="g")
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
@requires_h5netcdf
class TestH5NetCDFFileObject(TestH5NetCDFData):
engine = "h5netcdf"
def test_open_badbytes(self):
with raises_regex(ValueError, "HDF5 as bytes"):
with open_dataset(b"\211HDF\r\n\032\n", engine="h5netcdf"):
pass
with raises_regex(ValueError, "not a valid netCDF"):
with open_dataset(b"garbage"):
pass
with raises_regex(ValueError, "can only read bytes"):
with open_dataset(b"garbage", engine="netcdf4"):
pass
with raises_regex(ValueError, "not a valid netCDF"):
with open_dataset(BytesIO(b"garbage"), engine="h5netcdf"):
pass
def test_open_twice(self):
expected = create_test_data()
expected.attrs["foo"] = "bar"
with raises_regex(ValueError, "read/write pointer not at zero"):
with create_tmp_file() as tmp_file:
expected.to_netcdf(tmp_file, engine="h5netcdf")
with open(tmp_file, "rb") as f:
with open_dataset(f, engine="h5netcdf"):
with open_dataset(f, engine="h5netcdf"):
pass
def test_open_fileobj(self):
# open in-memory datasets instead of local file paths
expected = create_test_data().drop_vars("dim3")
expected.attrs["foo"] = "bar"
with create_tmp_file() as tmp_file:
expected.to_netcdf(tmp_file, engine="h5netcdf")
with open(tmp_file, "rb") as f:
with open_dataset(f, engine="h5netcdf") as actual:
assert_identical(expected, actual)
f.seek(0)
with BytesIO(f.read()) as bio:
with open_dataset(bio, engine="h5netcdf") as actual:
assert_identical(expected, actual)
@requires_h5netcdf
@requires_dask
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
class TestH5NetCDFViaDaskData(TestH5NetCDFData):
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
open_kwargs.setdefault("chunks", -1)
with TestH5NetCDFData.roundtrip(
self, data, save_kwargs, open_kwargs, allow_cleanup_failure
) as ds:
yield ds
def test_dataset_caching(self):
# caching behavior differs for dask
pass
def test_write_inconsistent_chunks(self):
# Construct two variables with the same dimensions, but different
# chunk sizes.
x = da.zeros((100, 100), dtype="f4", chunks=(50, 100))
x = DataArray(data=x, dims=("lat", "lon"), name="x")
x.encoding["chunksizes"] = (50, 100)
x.encoding["original_shape"] = (100, 100)
y = da.ones((100, 100), dtype="f4", chunks=(100, 50))
y = DataArray(data=y, dims=("lat", "lon"), name="y")
y.encoding["chunksizes"] = (100, 50)
y.encoding["original_shape"] = (100, 100)
# Put them both into the same dataset
ds = Dataset({"x": x, "y": y})
with self.roundtrip(ds) as actual:
assert actual["x"].encoding["chunksizes"] == (50, 100)
assert actual["y"].encoding["chunksizes"] == (100, 50)
@pytest.fixture(params=["scipy", "netcdf4", "h5netcdf", "pynio"])
def readengine(request):
return request.param
@pytest.fixture(params=[1, 20])
def nfiles(request):
return request.param
@pytest.fixture(params=[5, None])
def file_cache_maxsize(request):
maxsize = request.param
if maxsize is not None:
with set_options(file_cache_maxsize=maxsize):
yield maxsize
else:
yield maxsize
@pytest.fixture(params=[True, False])
def parallel(request):
return request.param
@pytest.fixture(params=[None, 5])
def chunks(request):
return request.param
# using pytest.mark.skipif does not work so this a work around
def skip_if_not_engine(engine):
if engine == "netcdf4":
pytest.importorskip("netCDF4")
elif engine == "pynio":
pytest.importorskip("Nio")
else:
pytest.importorskip(engine)
@requires_dask
@pytest.mark.filterwarnings("ignore:use make_scale(name) instead")
def test_open_mfdataset_manyfiles(
readengine, nfiles, parallel, chunks, file_cache_maxsize
):
# skip certain combinations
skip_if_not_engine(readengine)
if ON_WINDOWS:
pytest.skip("Skipping on Windows")
randdata = np.random.randn(nfiles)
original = Dataset({"foo": ("x", randdata)})
# test standard open_mfdataset approach with too many files
with create_tmp_files(nfiles) as tmpfiles:
writeengine = readengine if readengine != "pynio" else "netcdf4"
# split into multiple sets of temp files
for ii in original.x.values:
subds = original.isel(x=slice(ii, ii + 1))
subds.to_netcdf(tmpfiles[ii], engine=writeengine)
# check that calculation on opened datasets works properly
with open_mfdataset(
tmpfiles,
combine="nested",
concat_dim="x",
engine=readengine,
parallel=parallel,
chunks=chunks,
) as actual:
# check that using open_mfdataset returns dask arrays for variables
assert isinstance(actual["foo"].data, dask_array_type)
assert_identical(original, actual)
@requires_netCDF4
@requires_dask
def test_open_mfdataset_list_attr():
"""
Case when an attribute of type list differs across the multiple files
"""
from netCDF4 import Dataset
with create_tmp_files(2) as nfiles:
for i in range(2):
f = Dataset(nfiles[i], "w")
f.createDimension("x", 3)
vlvar = f.createVariable("test_var", np.int32, ("x"))
# here create an attribute as a list
vlvar.test_attr = [f"string a {i}", f"string b {i}"]
vlvar[:] = np.arange(3)
f.close()
ds1 = open_dataset(nfiles[0])
ds2 = open_dataset(nfiles[1])
original = xr.concat([ds1, ds2], dim="x")
with xr.open_mfdataset(
[nfiles[0], nfiles[1]], combine="nested", concat_dim="x"
) as actual:
assert_identical(actual, original)
@requires_scipy_or_netCDF4
@requires_dask
class TestOpenMFDatasetWithDataVarsAndCoordsKw:
coord_name = "lon"
var_name = "v1"
@contextlib.contextmanager
def setup_files_and_datasets(self, fuzz=0):
ds1, ds2 = self.gen_datasets_with_common_coord_and_time()
# to test join='exact'
ds1["x"] = ds1.x + fuzz
with create_tmp_file() as tmpfile1:
with create_tmp_file() as tmpfile2:
# save data to the temporary files
ds1.to_netcdf(tmpfile1)
ds2.to_netcdf(tmpfile2)
yield [tmpfile1, tmpfile2], [ds1, ds2]
def gen_datasets_with_common_coord_and_time(self):
# create coordinate data
nx = 10
nt = 10
x = np.arange(nx)
t1 = np.arange(nt)
t2 = np.arange(nt, 2 * nt, 1)
v1 = np.random.randn(nt, nx)
v2 = np.random.randn(nt, nx)
ds1 = Dataset(
data_vars={self.var_name: (["t", "x"], v1), self.coord_name: ("x", 2 * x)},
coords={"t": (["t"], t1), "x": (["x"], x)},
)
ds2 = Dataset(
data_vars={self.var_name: (["t", "x"], v2), self.coord_name: ("x", 2 * x)},
coords={"t": (["t"], t2), "x": (["x"], x)},
)
return ds1, ds2
@pytest.mark.parametrize("combine", ["nested", "by_coords"])
@pytest.mark.parametrize("opt", ["all", "minimal", "different"])
@pytest.mark.parametrize("join", ["outer", "inner", "left", "right"])
def test_open_mfdataset_does_same_as_concat(self, combine, opt, join):
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
if combine == "by_coords":
files.reverse()
with open_mfdataset(
files, data_vars=opt, combine=combine, concat_dim="t", join=join
) as ds:
ds_expect = xr.concat([ds1, ds2], data_vars=opt, dim="t", join=join)
assert_identical(ds, ds_expect)
def test_open_mfdataset_dataset_attr_by_coords(self):
"""
Case when an attribute differs across the multiple files
"""
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds.attrs["test_dataset_attr"] = 10 + i
ds.close()
ds.to_netcdf(f)
with xr.open_mfdataset(files, combine="by_coords", concat_dim="t") as ds:
assert ds.test_dataset_attr == 10
def test_open_mfdataset_dataarray_attr_by_coords(self):
"""
Case when an attribute of a member DataArray differs across the multiple files
"""
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds["v1"].attrs["test_dataarray_attr"] = i
ds.close()
ds.to_netcdf(f)
with xr.open_mfdataset(files, combine="by_coords", concat_dim="t") as ds:
assert ds["v1"].test_dataarray_attr == 0
@pytest.mark.parametrize("combine", ["nested", "by_coords"])
@pytest.mark.parametrize("opt", ["all", "minimal", "different"])
def test_open_mfdataset_exact_join_raises_error(self, combine, opt):
with self.setup_files_and_datasets(fuzz=0.1) as (files, [ds1, ds2]):
if combine == "by_coords":
files.reverse()
with raises_regex(ValueError, "indexes along dimension"):
open_mfdataset(
files, data_vars=opt, combine=combine, concat_dim="t", join="exact"
)
def test_common_coord_when_datavars_all(self):
opt = "all"
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files with the data_var option
with open_mfdataset(
files, data_vars=opt, combine="nested", concat_dim="t"
) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
assert var_shape == coord_shape
assert coord_shape1 != coord_shape
assert coord_shape2 != coord_shape
def test_common_coord_when_datavars_minimal(self):
opt = "minimal"
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files using data_vars option
with open_mfdataset(
files, data_vars=opt, combine="nested", concat_dim="t"
) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
assert var_shape != coord_shape
assert coord_shape1 == coord_shape
assert coord_shape2 == coord_shape
def test_invalid_data_vars_value_should_fail(self):
with self.setup_files_and_datasets() as (files, _):
with pytest.raises(ValueError):
with open_mfdataset(files, data_vars="minimum", combine="by_coords"):
pass
# test invalid coord parameter
with pytest.raises(ValueError):
with open_mfdataset(files, coords="minimum", combine="by_coords"):
pass
@requires_dask
@requires_scipy
@requires_netCDF4
class TestDask(DatasetIOBase):
@contextlib.contextmanager
def create_store(self):
yield Dataset()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
yield data.chunk()
# Override methods in DatasetIOBase - not applicable to dask
def test_roundtrip_string_encoded_characters(self):
pass
def test_roundtrip_coordinates_with_space(self):
pass
def test_roundtrip_numpy_datetime_data(self):
# Override method in DatasetIOBase - remove not applicable
# save_kwargs
times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"])
expected = Dataset({"t": ("t", times), "t0": times[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_cftime_datetime_data(self):
# Override method in DatasetIOBase - remove not applicable
# save_kwargs
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({"t": ("t", times), "t0": times[0]})
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
with self.roundtrip(expected) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
assert (abs_diff <= np.timedelta64(1, "s")).all()
abs_diff = abs(actual.t0.values - expected_decoded_t0)
assert (abs_diff <= np.timedelta64(1, "s")).all()
def test_write_store(self):
# Override method in DatasetIOBase - not applicable to dask
pass
def test_dataset_caching(self):
expected = Dataset({"foo": ("x", [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_open_mfdataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5),)
assert_identical(original, actual)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", chunks={"x": 3}
) as actual:
assert actual.foo.variable.data.chunks == ((3, 2, 3, 2),)
with raises_regex(IOError, "no files to open"):
open_mfdataset("foo-bar-baz-*.nc")
with raises_regex(ValueError, "wild-card"):
open_mfdataset("http://some/remote/uri")
def test_open_mfdataset_2d(self):
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
with create_tmp_file() as tmp3:
with create_tmp_file() as tmp4:
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5), (4, 4))
assert_identical(original, actual)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
chunks={"x": 3, "y": 2},
) as actual:
assert actual.foo.variable.data.chunks == (
(3, 2, 3, 2),
(2, 2, 2, 2),
)
def test_open_mfdataset_pathlib(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2d_pathlib(self):
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
with create_tmp_file() as tmp3:
with create_tmp_file() as tmp4:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
tmp3 = Path(tmp3)
tmp4 = Path(tmp4)
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(original, actual)
def test_attrs_mfdataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
# presumes that attributes inherited from
# first dataset loaded
assert actual.test1 == ds1.test1
# attributes from ds2 are not retained, e.g.,
with raises_regex(AttributeError, "no attribute"):
actual.test2
def test_open_mfdataset_attrs_file(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmp1, tmp2):
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2
) as actual:
# attributes are inherited from the master file
assert actual.attrs["test2"] == ds2.attrs["test2"]
# attributes from ds1 are not retained, e.g.,
assert "test1" not in actual.attrs
def test_open_mfdataset_attrs_file_path(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmp1, tmp2):
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2
) as actual:
# attributes are inherited from the master file
assert actual.attrs["test2"] == ds2.attrs["test2"]
# attributes from ds1 are not retained, e.g.,
assert "test1" not in actual.attrs
def test_open_mfdataset_auto_combine(self):
original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp2, tmp1], combine="by_coords") as actual:
assert_identical(original, actual)
@pytest.mark.xfail(reason="mfdataset loses encoding currently.")
def test_encoding_mfdataset(self):
original = Dataset(
{
"foo": ("t", np.random.randn(10)),
"t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")),
}
)
original.t.encoding["units"] = "days since 2010-01-01"
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(t=slice(5))
ds2 = original.isel(t=slice(5, 10))
ds1.t.encoding["units"] = "days since 2010-01-01"
ds2.t.encoding["units"] = "days since 2000-01-01"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2], combine="nested") as actual:
assert actual.t.encoding["units"] == original.t.encoding["units"]
assert actual.t.encoding["units"] == ds1.t.encoding["units"]
assert actual.t.encoding["units"] != ds2.t.encoding["units"]
def test_preprocess_mfdataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
def preprocess(ds):
return ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(
tmp, preprocess=preprocess, combine="by_coords"
) as actual:
assert_identical(expected, actual)
def test_save_mfdataset_roundtrip(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with raises_regex(ValueError, "cannot use mode"):
save_mfdataset([ds, ds], ["same", "same"])
with raises_regex(ValueError, "same length"):
save_mfdataset([ds, ds], ["only one path"])
def test_save_mfdataset_invalid_dataarray(self):
# regression test for GH1555
da = DataArray([1, 2])
with raises_regex(TypeError, "supports writing Dataset"):
save_mfdataset([da], ["dataarray"])
def test_save_mfdataset_pathlib_roundtrip(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(actual, original)
def test_open_and_do_math(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp, combine="by_coords") as ds:
actual = 1.0 * ds
assert_allclose(original, actual, decode_bytes=False)
def test_open_mfdataset_concat_dim_none(self):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({"x": 0})
data.to_netcdf(tmp1)
Dataset({"x": np.nan}).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim=None, combine="nested"
) as actual:
assert_identical(data, actual)
def test_open_mfdataset_concat_dim_default_none(self):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({"x": 0})
data.to_netcdf(tmp1)
Dataset({"x": np.nan}).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2], combine="nested") as actual:
assert_identical(data, actual)
def test_open_dataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={"x": 5}) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5),)
assert_identical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
assert_identical(original, actual)
with open_dataset(tmp) as actual:
assert isinstance(actual.foo.variable.data, np.ndarray)
assert_identical(original, actual)
def test_open_single_dataset(self):
# Test for issue GH #1988. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
dim = DataArray([100], name="baz", dims="baz")
expected = Dataset(
{"foo": (("baz", "x"), rnddata[np.newaxis, :])}, {"baz": [100]}
)
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset([tmp], concat_dim=dim, combine="nested") as actual:
assert_identical(expected, actual)
def test_open_multi_dataset(self):
# Test for issue GH #1988 and #2647. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
# The additional wrinkle is to ensure that a length greater
# than one is tested as well due to numpy's implicit casting
# of 1-length arrays to booleans in tests, which allowed
# #2647 to still pass the test_open_single_dataset(),
# which is itself still needed as-is because the original
# bug caused one-length arrays to not be used correctly
# in concatenation.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
dim = DataArray([100, 150], name="baz", dims="baz")
expected = Dataset(
{"foo": (("baz", "x"), np.tile(rnddata[np.newaxis, :], (2, 1)))},
{"baz": [100, 150]},
)
with create_tmp_file() as tmp1, create_tmp_file() as tmp2:
original.to_netcdf(tmp1)
original.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim=dim, combine="nested"
) as actual:
assert_identical(expected, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {"dim1": 4, "dim2": 4, "dim3": 4, "time": 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
assert_identical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
assert_identical(data, on_disk)
def test_deterministic_names(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
with open_mfdataset(tmp, combine="by_coords") as ds:
original_names = {k: v.data.name for k, v in ds.data_vars.items()}
with open_mfdataset(tmp, combine="by_coords") as ds:
repeat_names = {k: v.data.name for k, v in ds.data_vars.items()}
for var_name, dask_name in original_names.items():
assert var_name in dask_name
assert dask_name[:13] == "open_dataset-"
assert original_names == repeat_names
def test_dataarray_compute(self):
# Test DataArray.compute() on dask backend.
# The test for Dataset.compute() is already in DatasetIOBase;
# however dask is the only tested backend which supports DataArrays
actual = DataArray([1, 2]).chunk()
computed = actual.compute()
assert not actual._in_memory
assert computed._in_memory
assert_allclose(actual, computed, decode_bytes=False)
def test_save_mfdataset_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = Dataset({"foo": ("x", np.random.randn(10))}).chunk()
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp1:
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp2:
delayed_obj = save_mfdataset(
datasets, [tmp1, tmp2], engine=self.engine, compute=False
)
assert isinstance(delayed_obj, Delayed)
delayed_obj.compute()
with open_mfdataset(
[tmp1, tmp2], combine="nested", concat_dim="x"
) as actual:
assert_identical(actual, original)
def test_load_dataset(self):
with create_tmp_file() as tmp:
original = Dataset({"foo": ("x", np.random.randn(10))})
original.to_netcdf(tmp)
ds = load_dataset(tmp)
# this would fail if we used open_dataset instead of load_dataset
ds.to_netcdf(tmp)
def test_load_dataarray(self):
with create_tmp_file() as tmp:
original = Dataset({"foo": ("x", np.random.randn(10))})
original.to_netcdf(tmp)
ds = load_dataarray(tmp)
# this would fail if we used open_dataarray instead of
# load_dataarray
ds.to_netcdf(tmp)
@requires_scipy_or_netCDF4
@requires_pydap
@pytest.mark.filterwarnings("ignore:The binary mode of fromstring is deprecated")
class TestPydap:
def convert_to_pydap_dataset(self, original):
from pydap.model import GridType, BaseType, DatasetType
ds = DatasetType("bears", **original.attrs)
for key, var in original.data_vars.items():
v = GridType(key)
v[key] = BaseType(key, var.values, dimensions=var.dims, **var.attrs)
for d in var.dims:
v[d] = BaseType(d, var[d].values)
ds[key] = v
# check all dims are stored in ds
for d in original.coords:
ds[d] = BaseType(
d, original[d].values, dimensions=(d,), **original[d].attrs
)
return ds
@contextlib.contextmanager
def create_datasets(self, **kwargs):
with open_example_dataset("bears.nc") as expected:
pydap_ds = self.convert_to_pydap_dataset(expected)
actual = open_dataset(PydapDataStore(pydap_ds))
# TODO solve this workaround:
# netcdf converts string to byte not unicode
expected["bears"] = expected["bears"].astype(str)
yield actual, expected
def test_cmp_local_file(self):
with self.create_datasets() as (actual, expected):
assert_equal(actual, expected)
# global attributes should be global attributes on the dataset
assert "NC_GLOBAL" not in actual.attrs
assert "history" in actual.attrs
# we don't check attributes exactly with assertDatasetIdentical()
# because the test DAP server seems to insert some extra
# attributes not found in the netCDF file.
assert actual.attrs.keys() == expected.attrs.keys()
with self.create_datasets() as (actual, expected):
assert_equal(actual[{"l": 2}], expected[{"l": 2}])
with self.create_datasets() as (actual, expected):
assert_equal(actual.isel(i=0, j=-1), expected.isel(i=0, j=-1))
with self.create_datasets() as (actual, expected):
assert_equal(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2)))
with self.create_datasets() as (actual, expected):
indexers = {"i": [1, 0, 0], "j": [1, 2, 0, 1]}
assert_equal(actual.isel(**indexers), expected.isel(**indexers))
with self.create_datasets() as (actual, expected):
indexers = {
"i": DataArray([0, 1, 0], dims="a"),
"j": DataArray([0, 2, 1], dims="a"),
}
assert_equal(actual.isel(**indexers), expected.isel(**indexers))
def test_compatible_to_netcdf(self):
# make sure it can be saved as a netcdf
with self.create_datasets() as (actual, expected):
with create_tmp_file() as tmp_file:
actual.to_netcdf(tmp_file)
with open_dataset(tmp_file) as actual2:
actual2["bears"] = actual2["bears"].astype(str)
assert_equal(actual2, expected)
@requires_dask
def test_dask(self):
with self.create_datasets(chunks={"j": 2}) as (actual, expected):
assert_equal(actual, expected)
@network
@requires_scipy_or_netCDF4
@requires_pydap
class TestPydapOnline(TestPydap):
@contextlib.contextmanager
def create_datasets(self, **kwargs):
url = "http://test.opendap.org/opendap/hyrax/data/nc/bears.nc"
actual = open_dataset(url, engine="pydap", **kwargs)
with open_example_dataset("bears.nc") as expected:
# workaround to restore string which is converted to byte
expected["bears"] = expected["bears"].astype(str)
yield actual, expected
def test_session(self):
from pydap.cas.urs import setup_session
session = setup_session("XarrayTestUser", "Xarray2017")
with mock.patch("pydap.client.open_url") as mock_func:
xr.backends.PydapDataStore.open("http://test.url", session=session)
mock_func.assert_called_with("http://test.url", session=session)
@requires_scipy
@requires_pynio
class TestPyNio(CFEncodedBase, NetCDF3Only):
def test_write_store(self):
# pynio is read-only for now
pass
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_dataset(path, engine="pynio", **kwargs) as ds:
yield ds
def test_kwargs(self):
kwargs = {"format": "grib"}
path = os.path.join(os.path.dirname(__file__), "data", "example")
with backends.NioDataStore(path, **kwargs) as store:
assert store._manager._kwargs["format"] == "grib"
def save(self, dataset, path, **kwargs):
return dataset.to_netcdf(path, engine="scipy", **kwargs)
def test_weakrefs(self):
example = Dataset({"foo": ("x", np.arange(5.0))})
expected = example.rename({"foo": "bar", "x": "y"})
with create_tmp_file() as tmp_file:
example.to_netcdf(tmp_file, engine="scipy")
on_disk = open_dataset(tmp_file, engine="pynio")
actual = on_disk.rename({"foo": "bar", "x": "y"})
del on_disk # trigger garbage collection
assert_identical(actual, expected)
@requires_cfgrib
class TestCfGrib:
def test_read(self):
expected = {
"number": 2,
"time": 3,
"isobaricInhPa": 2,
"latitude": 3,
"longitude": 4,
}
with open_example_dataset("example.grib", engine="cfgrib") as ds:
assert ds.dims == expected
assert list(ds.data_vars) == ["z", "t"]
assert ds["z"].min() == 12660.0
def test_read_filter_by_keys(self):
kwargs = {"filter_by_keys": {"shortName": "t"}}
expected = {
"number": 2,
"time": 3,
"isobaricInhPa": 2,
"latitude": 3,
"longitude": 4,
}
with open_example_dataset(
"example.grib", engine="cfgrib", backend_kwargs=kwargs
) as ds:
assert ds.dims == expected
assert list(ds.data_vars) == ["t"]
assert ds["t"].min() == 231.0
@requires_pseudonetcdf
@pytest.mark.filterwarnings("ignore:IOAPI_ISPH is assumed to be 6370000")
class TestPseudoNetCDFFormat:
def open(self, path, **kwargs):
return open_dataset(path, engine="pseudonetcdf", **kwargs)
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path:
self.save(data, path, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
def test_ict_format(self):
"""
Open a CAMx file and test data variables
"""
ictfile = open_example_dataset(
"example.ict", engine="pseudonetcdf", backend_kwargs={"format": "ffi1001"}
)
stdattr = {
"fill_value": -9999.0,
"missing_value": -9999,
"scale": 1,
"llod_flag": -8888,
"llod_value": "N/A",
"ulod_flag": -7777,
"ulod_value": "N/A",
}
def myatts(**attrs):
outattr = stdattr.copy()
outattr.update(attrs)
return outattr
input = {
"coords": {},
"attrs": {
"fmt": "1001",
"n_header_lines": 27,
"PI_NAME": "Henderson, Barron",
"ORGANIZATION_NAME": "U.S. EPA",
"SOURCE_DESCRIPTION": "Example file with artificial data",
"MISSION_NAME": "JUST_A_TEST",
"VOLUME_INFO": "1, 1",
"SDATE": "2018, 04, 27",
"WDATE": "2018, 04, 27",
"TIME_INTERVAL": "0",
"INDEPENDENT_VARIABLE": "Start_UTC",
"ULOD_FLAG": "-7777",
"ULOD_VALUE": "N/A",
"LLOD_FLAG": "-8888",
"LLOD_VALUE": ("N/A, N/A, N/A, N/A, 0.025"),
"OTHER_COMMENTS": (
"www-air.larc.nasa.gov/missions/etc/" + "IcarttDataFormat.htm"
),
"REVISION": "R0",
"R0": "No comments for this revision.",
"TFLAG": "Start_UTC",
},
"dims": {"POINTS": 4},
"data_vars": {
"Start_UTC": {
"data": [43200.0, 46800.0, 50400.0, 50400.0],
"dims": ("POINTS",),
"attrs": myatts(units="Start_UTC", standard_name="Start_UTC"),
},
"lat": {
"data": [41.0, 42.0, 42.0, 42.0],
"dims": ("POINTS",),
"attrs": myatts(units="degrees_north", standard_name="lat"),
},
"lon": {
"data": [-71.0, -72.0, -73.0, -74.0],
"dims": ("POINTS",),
"attrs": myatts(units="degrees_east", standard_name="lon"),
},
"elev": {
"data": [5.0, 15.0, 20.0, 25.0],
"dims": ("POINTS",),
"attrs": myatts(units="meters", standard_name="elev"),
},
"TEST_ppbv": {
"data": [1.2345, 2.3456, 3.4567, 4.5678],
"dims": ("POINTS",),
"attrs": myatts(units="ppbv", standard_name="TEST_ppbv"),
},
"TESTM_ppbv": {
"data": [2.22, -9999.0, -7777.0, -8888.0],
"dims": ("POINTS",),
"attrs": myatts(
units="ppbv", standard_name="TESTM_ppbv", llod_value=0.025
),
},
},
}
chkfile = Dataset.from_dict(input)
assert_identical(ictfile, chkfile)
def test_ict_format_write(self):
fmtkw = {"format": "ffi1001"}
expected = open_example_dataset(
"example.ict", engine="pseudonetcdf", backend_kwargs=fmtkw
)
with self.roundtrip(
expected, save_kwargs=fmtkw, open_kwargs={"backend_kwargs": fmtkw}
) as actual:
assert_identical(expected, actual)
def test_uamiv_format_read(self):
"""
Open a CAMx file and test data variables
"""
camxfile = open_example_dataset(
"example.uamiv", engine="pseudonetcdf", backend_kwargs={"format": "uamiv"}
)
data = np.arange(20, dtype="f").reshape(1, 1, 4, 5)
expected = xr.Variable(
("TSTEP", "LAY", "ROW", "COL"),
data,
dict(units="ppm", long_name="O3".ljust(16), var_desc="O3".ljust(80)),
)
actual = camxfile.variables["O3"]
assert_allclose(expected, actual)
data = np.array([[[2002154, 0]]], dtype="i")
expected = xr.Variable(
("TSTEP", "VAR", "DATE-TIME"),
data,
dict(
long_name="TFLAG".ljust(16),
var_desc="TFLAG".ljust(80),
units="DATE-TIME".ljust(16),
),
)
actual = camxfile.variables["TFLAG"]
assert_allclose(expected, actual)
camxfile.close()
@requires_dask
def test_uamiv_format_mfread(self):
"""
Open a CAMx file and test data variables
"""
camxfile = open_example_mfdataset(
["example.uamiv", "example.uamiv"],
engine="pseudonetcdf",
concat_dim="TSTEP",
combine="nested",
backend_kwargs={"format": "uamiv"},
)
data1 = np.arange(20, dtype="f").reshape(1, 1, 4, 5)
data = np.concatenate([data1] * 2, axis=0)
expected = xr.Variable(
("TSTEP", "LAY", "ROW", "COL"),
data,
dict(units="ppm", long_name="O3".ljust(16), var_desc="O3".ljust(80)),
)
actual = camxfile.variables["O3"]
assert_allclose(expected, actual)
data = np.array([[[2002154, 0]]], dtype="i").repeat(2, 0)
attrs = dict(
long_name="TFLAG".ljust(16),
var_desc="TFLAG".ljust(80),
units="DATE-TIME".ljust(16),
)
dims = ("TSTEP", "VAR", "DATE-TIME")
expected = xr.Variable(dims, data, attrs)
actual = camxfile.variables["TFLAG"]
assert_allclose(expected, actual)
camxfile.close()
@pytest.mark.xfail(reason="Flaky; see GH3711")
def test_uamiv_format_write(self):
fmtkw = {"format": "uamiv"}
expected = open_example_dataset(
"example.uamiv", engine="pseudonetcdf", backend_kwargs=fmtkw
)
with self.roundtrip(
expected,
save_kwargs=fmtkw,
open_kwargs={"backend_kwargs": fmtkw},
allow_cleanup_failure=True,
) as actual:
assert_identical(expected, actual)
expected.close()
def save(self, dataset, path, **save_kwargs):
import PseudoNetCDF as pnc
pncf = pnc.PseudoNetCDFFile()
pncf.dimensions = {
k: pnc.PseudoNetCDFDimension(pncf, k, v) for k, v in dataset.dims.items()
}
pncf.variables = {
k: pnc.PseudoNetCDFVariable(
pncf, k, v.dtype.char, v.dims, values=v.data[...], **v.attrs
)
for k, v in dataset.variables.items()
}
for pk, pv in dataset.attrs.items():
setattr(pncf, pk, pv)
pnc.pncwrite(pncf, path, **save_kwargs)
@requires_rasterio
@contextlib.contextmanager
def create_tmp_geotiff(
nx=4,
ny=3,
nz=3,
transform=None,
transform_args=default_value,
crs=default_value,
open_kwargs=None,
additional_attrs=None,
):
if transform_args is default_value:
transform_args = [5000, 80000, 1000, 2000.0]
if crs is default_value:
crs = {
"units": "m",
"no_defs": True,
"ellps": "WGS84",
"proj": "utm",
"zone": 18,
}
# yields a temporary geotiff file and a corresponding expected DataArray
import rasterio
from rasterio.transform import from_origin
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(suffix=".tif", allow_cleanup_failure=ON_WINDOWS) as tmp_file:
# allow 2d or 3d shapes
if nz == 1:
data_shape = ny, nx
write_kwargs = {"indexes": 1}
else:
data_shape = nz, ny, nx
write_kwargs = {}
data = np.arange(nz * ny * nx, dtype=rasterio.float32).reshape(*data_shape)
if transform is None:
transform = from_origin(*transform_args)
if additional_attrs is None:
additional_attrs = {
"descriptions": tuple("d{}".format(n + 1) for n in range(nz)),
"units": tuple("u{}".format(n + 1) for n in range(nz)),
}
with rasterio.open(
tmp_file,
"w",
driver="GTiff",
height=ny,
width=nx,
count=nz,
crs=crs,
transform=transform,
dtype=rasterio.float32,
**open_kwargs,
) as s:
for attr, val in additional_attrs.items():
setattr(s, attr, val)
s.write(data, **write_kwargs)
dx, dy = s.res[0], -s.res[1]
a, b, c, d = transform_args
data = data[np.newaxis, ...] if nz == 1 else data
expected = DataArray(
data,
dims=("band", "y", "x"),
coords={
"band": np.arange(nz) + 1,
"y": -np.arange(ny) * d + b + dy / 2,
"x": np.arange(nx) * c + a + dx / 2,
},
)
yield tmp_file, expected
@requires_rasterio
class TestRasterio:
@requires_scipy_or_netCDF4
def test_serialization(self):
with create_tmp_geotiff(additional_attrs={}) as (tmp_file, expected):
# Write it to a netcdf and read again (roundtrip)
with xr.open_rasterio(tmp_file) as rioda:
with create_tmp_file(suffix=".nc") as tmp_nc_file:
rioda.to_netcdf(tmp_nc_file)
with xr.open_dataarray(tmp_nc_file) as ncds:
assert_identical(rioda, ncds)
def test_utm(self):
with create_tmp_geotiff() as (tmp_file, expected):
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert rioda.attrs["scales"] == (1.0, 1.0, 1.0)
assert rioda.attrs["offsets"] == (0.0, 0.0, 0.0)
assert rioda.attrs["descriptions"] == ("d1", "d2", "d3")
assert rioda.attrs["units"] == ("u1", "u2", "u3")
assert isinstance(rioda.attrs["crs"], str)
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
np.testing.assert_array_equal(
rioda.attrs["nodatavals"], [np.NaN, np.NaN, np.NaN]
)
# Check no parse coords
with xr.open_rasterio(tmp_file, parse_coordinates=False) as rioda:
assert "x" not in rioda.coords
assert "y" not in rioda.coords
def test_non_rectilinear(self):
from rasterio.transform import from_origin
# Create a geotiff file with 2d coordinates
with create_tmp_geotiff(
transform=from_origin(0, 3, 1, 1).rotation(45), crs=None
) as (tmp_file, _):
# Default is to not parse coords
with xr.open_rasterio(tmp_file) as rioda:
assert "x" not in rioda.coords
assert "y" not in rioda.coords
assert "crs" not in rioda.attrs
assert rioda.attrs["scales"] == (1.0, 1.0, 1.0)
assert rioda.attrs["offsets"] == (0.0, 0.0, 0.0)
assert rioda.attrs["descriptions"] == ("d1", "d2", "d3")
assert rioda.attrs["units"] == ("u1", "u2", "u3")
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
# See if a warning is raised if we force it
with pytest.warns(Warning, match="transformation isn't rectilinear"):
with xr.open_rasterio(tmp_file, parse_coordinates=True) as rioda:
assert "x" not in rioda.coords
assert "y" not in rioda.coords
def test_platecarree(self):
with create_tmp_geotiff(
8,
10,
1,
transform_args=[1, 2, 0.5, 2.0],
crs="+proj=latlong",
open_kwargs={"nodata": -9765},
) as (tmp_file, expected):
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert rioda.attrs["scales"] == (1.0,)
assert rioda.attrs["offsets"] == (0.0,)
assert isinstance(rioda.attrs["descriptions"], tuple)
assert isinstance(rioda.attrs["units"], tuple)
assert isinstance(rioda.attrs["crs"], str)
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
np.testing.assert_array_equal(rioda.attrs["nodatavals"], [-9765.0])
def test_notransform(self):
# regression test for https://github.com/pydata/xarray/issues/1686
import rasterio
import warnings
# Create a geotiff file
with warnings.catch_warnings():
# rasterio throws a NotGeoreferencedWarning here, which is
# expected since we test rasterio's defaults in this case.
warnings.filterwarnings(
"ignore",
category=UserWarning,
message="Dataset has no geotransform set",
)
with create_tmp_file(suffix=".tif") as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(
nz, ny, nx
)
with rasterio.open(
tmp_file,
"w",
driver="GTiff",
height=ny,
width=nx,
count=nz,
dtype=rasterio.float32,
) as s:
s.descriptions = ("nx", "ny", "nz")
s.units = ("cm", "m", "km")
s.write(data)
# Tests
expected = DataArray(
data,
dims=("band", "y", "x"),
coords={
"band": [1, 2, 3],
"y": [0.5, 1.5, 2.5],
"x": [0.5, 1.5, 2.5, 3.5],
},
)
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert rioda.attrs["scales"] == (1.0, 1.0, 1.0)
assert rioda.attrs["offsets"] == (0.0, 0.0, 0.0)
assert rioda.attrs["descriptions"] == ("nx", "ny", "nz")
assert rioda.attrs["units"] == ("cm", "m", "km")
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
def test_indexing(self):
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
with xr.open_rasterio(tmp_file, cache=False) as actual:
# tests
# assert_allclose checks all data + coordinates
assert_allclose(actual, expected)
assert not actual.variable._in_memory
# Basic indexer
ind = {"x": slice(2, 5), "y": slice(5, 7)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": slice(1, 2), "x": slice(2, 5), "y": slice(5, 7)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": slice(1, 2), "x": slice(2, 5), "y": 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# orthogonal indexer
ind = {
"band": np.array([2, 1, 0]),
"x": np.array([1, 0]),
"y": np.array([0, 2]),
}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": np.array([2, 1, 0]), "x": np.array([1, 0]), "y": 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": 0, "x": np.array([0, 0]), "y": np.array([1, 1, 1])}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# minus-stepped slice
ind = {"band": np.array([2, 1, 0]), "x": slice(-1, None, -1), "y": 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": np.array([2, 1, 0]), "x": 1, "y": slice(-1, 1, -2)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# empty selection
ind = {"band": np.array([2, 1, 0]), "x": 1, "y": slice(2, 2, 1)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": slice(0, 0), "x": 1, "y": 2}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# vectorized indexer
ind = {
"band": DataArray([2, 1, 0], dims="a"),
"x": DataArray([1, 0, 0], dims="a"),
"y": np.array([0, 2]),
}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {
"band": DataArray([[2, 1, 0], [1, 0, 2]], dims=["a", "b"]),
"x": DataArray([[1, 0, 0], [0, 1, 0]], dims=["a", "b"]),
"y": 0,
}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# Selecting lists of bands is fine
ex = expected.isel(band=[1, 2])
ac = actual.isel(band=[1, 2])
assert_allclose(ac, ex)
ex = expected.isel(band=[0, 2])
ac = actual.isel(band=[0, 2])
assert_allclose(ac, ex)
# Integer indexing
ex = expected.isel(band=1)
ac = actual.isel(band=1)
assert_allclose(ac, ex)
ex = expected.isel(x=1, y=2)
ac = actual.isel(x=1, y=2)
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=2)
ac = actual.isel(band=0, x=1, y=2)
assert_allclose(ac, ex)
# Mixed
ex = actual.isel(x=slice(2), y=slice(2))
ac = actual.isel(x=[0, 1], y=[0, 1])
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=slice(5, 7))
ac = actual.isel(band=0, x=1, y=slice(5, 7))
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=slice(2, 5), y=2)
ac = actual.isel(band=0, x=slice(2, 5), y=2)
assert_allclose(ac, ex)
# One-element lists
ex = expected.isel(band=[0], x=slice(2, 5), y=[2])
ac = actual.isel(band=[0], x=slice(2, 5), y=[2])
assert_allclose(ac, ex)
def test_caching(self):
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
# Cache is the default
with xr.open_rasterio(tmp_file) as actual:
# This should cache everything
assert_allclose(actual, expected)
# once cached, non-windowed indexing should become possible
ac = actual.isel(x=[2, 4])
ex = expected.isel(x=[2, 4])
assert_allclose(ac, ex)
@requires_dask
def test_chunks(self):
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
# Chunk at open time
with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:
import dask.array as da
assert isinstance(actual.data, da.Array)
assert "open_rasterio" in actual.data.name
# do some arithmetic
ac = actual.mean()
ex = expected.mean()
assert_allclose(ac, ex)
ac = actual.sel(band=1).mean(dim="x")
ex = expected.sel(band=1).mean(dim="x")
assert_allclose(ac, ex)
@pytest.mark.xfail(
not has_dask, reason="without dask, a non-serializable lock is used"
)
def test_pickle_rasterio(self):
# regression test for https://github.com/pydata/xarray/issues/2121
with create_tmp_geotiff() as (tmp_file, expected):
with xr.open_rasterio(tmp_file) as rioda:
temp = pickle.dumps(rioda)
with pickle.loads(temp) as actual:
assert_equal(actual, rioda)
def test_ENVI_tags(self):
rasterio = pytest.importorskip("rasterio", minversion="1.0a")
from rasterio.transform import from_origin
# Create an ENVI file with some tags in the ENVI namespace
# this test uses a custom driver, so we can't use create_tmp_geotiff
with create_tmp_file(suffix=".dat") as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(5000, 80000, 1000, 2000.0)
with rasterio.open(
tmp_file,
"w",
driver="ENVI",
height=ny,
width=nx,
count=nz,
crs={
"units": "m",
"no_defs": True,
"ellps": "WGS84",
"proj": "utm",
"zone": 18,
},
transform=transform,
dtype=rasterio.float32,
) as s:
s.update_tags(
ns="ENVI",
description="{Tagged file}",
wavelength="{123.000000, 234.234000, 345.345678}",
fwhm="{1.000000, 0.234000, 0.000345}",
)
s.write(data)
dx, dy = s.res[0], -s.res[1]
# Tests
coords = {
"band": [1, 2, 3],
"y": -np.arange(ny) * 2000 + 80000 + dy / 2,
"x": np.arange(nx) * 1000 + 5000 + dx / 2,
"wavelength": ("band", np.array([123, 234.234, 345.345678])),
"fwhm": ("band", np.array([1, 0.234, 0.000345])),
}
expected = DataArray(data, dims=("band", "y", "x"), coords=coords)
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert isinstance(rioda.attrs["crs"], str)
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
# from ENVI tags
assert isinstance(rioda.attrs["description"], str)
assert isinstance(rioda.attrs["map_info"], str)
assert isinstance(rioda.attrs["samples"], str)
def test_geotiff_tags(self):
# Create a geotiff file with some tags
with create_tmp_geotiff() as (tmp_file, _):
with xr.open_rasterio(tmp_file) as rioda:
assert isinstance(rioda.attrs["AREA_OR_POINT"], str)
@requires_dask
def test_no_mftime(self):
# rasterio can accept "filename" urguments that are actually urls,
# including paths to remote files.
# In issue #1816, we found that these caused dask to break, because
# the modification time was used to determine the dask token. This
# tests ensure we can still chunk such files when reading with
# rasterio.
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
with mock.patch("os.path.getmtime", side_effect=OSError):
with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:
import dask.array as da
assert isinstance(actual.data, da.Array)
assert_allclose(actual, expected)
@network
def test_http_url(self):
# more examples urls here
# http://download.osgeo.org/geotiff/samples/
url = "http://download.osgeo.org/geotiff/samples/made_up/ntf_nord.tif"
with xr.open_rasterio(url) as actual:
assert actual.shape == (1, 512, 512)
# make sure chunking works
with xr.open_rasterio(url, chunks=(1, 256, 256)) as actual:
import dask.array as da
assert isinstance(actual.data, da.Array)
def test_rasterio_environment(self):
import rasterio
with create_tmp_geotiff() as (tmp_file, expected):
# Should fail with error since suffix not allowed
with pytest.raises(Exception):
with rasterio.Env(GDAL_SKIP="GTiff"):
with xr.open_rasterio(tmp_file) as actual:
assert_allclose(actual, expected)
@pytest.mark.xfail(reason="rasterio 1.1.1 is broken. GH3573")
def test_rasterio_vrt(self):
import rasterio
# tmp_file default crs is UTM: CRS({'init': 'epsg:32618'}
with create_tmp_geotiff() as (tmp_file, expected):
with rasterio.open(tmp_file) as src:
with rasterio.vrt.WarpedVRT(src, crs="epsg:4326") as vrt:
expected_shape = (vrt.width, vrt.height)
expected_crs = vrt.crs
expected_res = vrt.res
# Value of single pixel in center of image
lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)
expected_val = next(vrt.sample([(lon, lat)]))
with xr.open_rasterio(vrt) as da:
actual_shape = (da.sizes["x"], da.sizes["y"])
actual_crs = da.crs
actual_res = da.res
actual_val = da.sel(dict(x=lon, y=lat), method="nearest").data
assert actual_crs == expected_crs
assert actual_res == expected_res
assert actual_shape == expected_shape
assert expected_val.all() == actual_val.all()
def test_rasterio_vrt_with_transform_and_size(self):
# Test open_rasterio() support of WarpedVRT with transform, width and
# height (issue #2864)
import rasterio
from rasterio.warp import calculate_default_transform
from affine import Affine
with create_tmp_geotiff() as (tmp_file, expected):
with rasterio.open(tmp_file) as src:
# Estimate the transform, width and height
# for a change of resolution
# tmp_file initial res is (1000,2000) (default values)
trans, w, h = calculate_default_transform(
src.crs, src.crs, src.width, src.height, resolution=500, *src.bounds
)
with rasterio.vrt.WarpedVRT(
src, transform=trans, width=w, height=h
) as vrt:
expected_shape = (vrt.width, vrt.height)
expected_res = vrt.res
expected_transform = vrt.transform
with xr.open_rasterio(vrt) as da:
actual_shape = (da.sizes["x"], da.sizes["y"])
actual_res = da.res
actual_transform = Affine(*da.transform)
assert actual_res == expected_res
assert actual_shape == expected_shape
assert actual_transform == expected_transform
def test_rasterio_vrt_with_src_crs(self):
# Test open_rasterio() support of WarpedVRT with specified src_crs
import rasterio
# create geotiff with no CRS and specify it manually
with create_tmp_geotiff(crs=None) as (tmp_file, expected):
src_crs = rasterio.crs.CRS({"init": "epsg:32618"})
with rasterio.open(tmp_file) as src:
assert src.crs is None
with rasterio.vrt.WarpedVRT(src, src_crs=src_crs) as vrt:
with xr.open_rasterio(vrt) as da:
assert da.crs == src_crs
@network
def test_rasterio_vrt_network(self):
# Make sure loading w/ rasterio give same results as xarray
import rasterio
# use same url that rasterio package uses in tests
prefix = "https://landsat-pds.s3.amazonaws.com/L8/139/045/"
image = "LC81390452014295LGN00/LC81390452014295LGN00_B1.TIF"
httpstif = prefix + image
with rasterio.Env(aws_unsigned=True):
with rasterio.open(httpstif) as src:
with rasterio.vrt.WarpedVRT(src, crs="epsg:4326") as vrt:
expected_shape = vrt.width, vrt.height
expected_res = vrt.res
# Value of single pixel in center of image
lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)
expected_val = next(vrt.sample([(lon, lat)]))
with xr.open_rasterio(vrt) as da:
actual_shape = da.sizes["x"], da.sizes["y"]
actual_res = da.res
actual_val = da.sel(dict(x=lon, y=lat), method="nearest").data
assert actual_shape == expected_shape
assert actual_res == expected_res
assert expected_val == actual_val
class TestEncodingInvalid:
def test_extract_nc4_variable_encoding(self):
var = xr.Variable(("x",), [1, 2, 3], {}, {"foo": "bar"})
with raises_regex(ValueError, "unexpected encoding"):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
var = xr.Variable(("x",), [1, 2, 3], {}, {"chunking": (2, 1)})
encoding = _extract_nc4_variable_encoding(var)
assert {} == encoding
# regression test
var = xr.Variable(("x",), [1, 2, 3], {}, {"shuffle": True})
encoding = _extract_nc4_variable_encoding(var, raise_on_invalid=True)
assert {"shuffle": True} == encoding
# Variables with unlim dims must be chunked on output.
var = xr.Variable(("x",), [1, 2, 3], {}, {"contiguous": True})
encoding = _extract_nc4_variable_encoding(var, unlimited_dims=("x",))
assert {} == encoding
def test_extract_h5nc_encoding(self):
# not supported with h5netcdf (yet)
var = xr.Variable(("x",), [1, 2, 3], {}, {"least_sigificant_digit": 2})
with raises_regex(ValueError, "unexpected encoding"):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
class MiscObject:
pass
@requires_netCDF4
class TestValidateAttrs:
def test_validating_attrs(self):
def new_dataset():
return Dataset({"data": ("y", np.arange(10.0))}, {"y": np.arange(10)})
def new_dataset_and_dataset_attrs():
ds = new_dataset()
return ds, ds.attrs
def new_dataset_and_data_attrs():
ds = new_dataset()
return ds, ds.data.attrs
def new_dataset_and_coord_attrs():
ds = new_dataset()
return ds, ds.coords["y"].attrs
for new_dataset_and_attrs in [
new_dataset_and_dataset_attrs,
new_dataset_and_data_attrs,
new_dataset_and_coord_attrs,
]:
ds, attrs = new_dataset_and_attrs()
attrs[123] = "test"
with raises_regex(TypeError, "Invalid name for attr"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs[MiscObject()] = "test"
with raises_regex(TypeError, "Invalid name for attr"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs[""] = "test"
with raises_regex(ValueError, "Invalid name for attr"):
ds.to_netcdf("test.nc")
# This one should work
ds, attrs = new_dataset_and_attrs()
attrs["test"] = "test"
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = {"a": 5}
with raises_regex(TypeError, "Invalid value for attr"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs["test"] = MiscObject()
with raises_regex(TypeError, "Invalid value for attr"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs["test"] = 5
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = 3.14
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = [1, 2, 3, 4]
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = (1.9, 2.5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = np.arange(5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = "This is a string"
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = ""
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
@requires_scipy_or_netCDF4
class TestDataArrayToNetCDF:
def test_dataarray_to_netcdf_no_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_netcdf_with_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)), name="test")
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_netcdf_coord_name_clash(self):
original_da = DataArray(
np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x"
)
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_open_dataarray_options(self):
data = DataArray(np.arange(5), coords={"y": ("x", range(5))}, dims=["x"])
with create_tmp_file() as tmp:
data.to_netcdf(tmp)
expected = data.drop_vars("y")
with open_dataarray(tmp, drop_variables=["y"]) as loaded:
assert_identical(expected, loaded)
@requires_scipy
def test_dataarray_to_netcdf_return_bytes(self):
# regression test for GH1410
data = xr.DataArray([1, 2, 3])
output = data.to_netcdf()
assert isinstance(output, bytes)
def test_dataarray_to_netcdf_no_name_pathlib(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
tmp = Path(tmp)
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
@requires_scipy_or_netCDF4
def test_no_warning_from_dask_effective_get():
with create_tmp_file() as tmpfile:
with pytest.warns(None) as record:
ds = Dataset()
ds.to_netcdf(tmpfile)
assert len(record) == 0
@requires_scipy_or_netCDF4
def test_source_encoding_always_present():
# Test for GH issue #2550.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp) as ds:
assert ds.encoding["source"] == tmp
def _assert_no_dates_out_of_range_warning(record):
undesired_message = "dates out of range"
for warning in record:
assert undesired_message not in str(warning.message)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_standard_calendar_default_in_range(calendar):
x = [0, 1]
time = [0, 720]
units_date = "2000-01-01"
units = "days since 2000-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
x_timedeltas = np.array(x).astype("timedelta64[D]")
time_timedeltas = np.array(time).astype("timedelta64[D]")
decoded_x = np.datetime64(units_date, "ns") + x_timedeltas
decoded_time = np.datetime64(units_date, "ns") + time_timedeltas
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.warns(None) as record:
with open_dataset(tmp_file) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_cftime
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2500])
def test_use_cftime_standard_calendar_default_out_of_range(calendar, units_year):
import cftime
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True)
decoded_time = cftime.num2date(
time, units, calendar, only_use_cftime_datetimes=True
)
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.warns(SerializationWarning):
with open_dataset(tmp_file) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
@requires_cftime
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_true(calendar, units_year):
import cftime
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True)
decoded_time = cftime.num2date(
time, units, calendar, only_use_cftime_datetimes=True
)
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.warns(None) as record:
with open_dataset(tmp_file, use_cftime=True) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_false_standard_calendar_in_range(calendar):
x = [0, 1]
time = [0, 720]
units_date = "2000-01-01"
units = "days since 2000-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
x_timedeltas = np.array(x).astype("timedelta64[D]")
time_timedeltas = np.array(time).astype("timedelta64[D]")
decoded_x = np.datetime64(units_date, "ns") + x_timedeltas
decoded_time = np.datetime64(units_date, "ns") + time_timedeltas
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.warns(None) as record:
with open_dataset(tmp_file, use_cftime=False) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2500])
def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year):
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.raises((OutOfBoundsDatetime, ValueError)):
open_dataset(tmp_file, use_cftime=False)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_false_nonstandard_calendar(calendar, units_year):
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.raises((OutOfBoundsDatetime, ValueError)):
open_dataset(tmp_file, use_cftime=False)
@pytest.mark.parametrize("engine", ["netcdf4", "scipy"])
def test_invalid_netcdf_raises(engine):
data = create_test_data()
with raises_regex(ValueError, "unrecognized option 'invalid_netcdf'"):
data.to_netcdf("foo.nc", engine=engine, invalid_netcdf=True)
@requires_zarr
def test_encode_zarr_attr_value():
# array -> list
arr = np.array([1, 2, 3])
expected = [1, 2, 3]
actual = backends.zarr.encode_zarr_attr_value(arr)
assert isinstance(actual, list)
assert actual == expected
# scalar array -> scalar
sarr = np.array(1)[()]
expected = 1
actual = backends.zarr.encode_zarr_attr_value(sarr)
assert isinstance(actual, int)
assert actual == expected
# string -> string (no change)
expected = "foo"
actual = backends.zarr.encode_zarr_attr_value(expected)
assert isinstance(actual, str)
assert actual == expected
@requires_zarr
def test_extract_zarr_variable_encoding():
var = xr.Variable("x", [1, 2])
actual = backends.zarr.extract_zarr_variable_encoding(var)
assert "chunks" in actual
assert actual["chunks"] is None
var = xr.Variable("x", [1, 2], encoding={"chunks": (1,)})
actual = backends.zarr.extract_zarr_variable_encoding(var)
assert actual["chunks"] == (1,)
# does not raise on invalid
var = xr.Variable("x", [1, 2], encoding={"foo": (1,)})
actual = backends.zarr.extract_zarr_variable_encoding(var)
# raises on invalid
var = xr.Variable("x", [1, 2], encoding={"foo": (1,)})
with raises_regex(ValueError, "unexpected encoding parameters"):
actual = backends.zarr.extract_zarr_variable_encoding(
var, raise_on_invalid=True
)
|
from datetime import datetime, timedelta
from flask import Flask, render_template, redirect, url_for, request, make_response
import os
import ConfigParser
import logging
from logging.handlers import RotatingFileHandler
import re
import werkzeug
import operator
import time
#import ledz
#http://stackoverflow.com/questions/20646822/how-to-serve-static-files-in-flask
app = Flask(__name__, static_url_path='')
Config = ConfigParser.ConfigParser()
ROOTDIR = None
LOGFILE = None
HTTPPORT = None
ISPROD = False
#static pages cache (to avoid reading from disk each time)
StaticPagesCache = dict()
#Werkzeug and Flask global constants
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'ico'])
##########################################################################################
#Check file name to upload
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
##########################################################################################
#Interpretes the Wiki tags and translate to HTML
def wikilize(html):
#jauge percentage
html = html.replace("[[0%]]", r"""<img src="/files/jauger0.gif"/>""")
html = html.replace("[[25%]]", r"""<img src="/files/jauger1.gif"/>""")
html = html.replace("[[50%]]", r"""<img src="/files/jauger2.gif"/>""")
html = html.replace("[[75%]]", r"""<img src="/files/jauger3.gif"/>""")
html = html.replace("[[100%]]", r"""<img src="/files/jauger4.gif"/>""")
#links
link = r"((?<!\<code\>)\[\[([^<].+?) \s*([|] \s* (.+?) \s*)?]])"
compLink = re.compile(link, re.X | re.U)
for i in compLink.findall(html):
title = [i[-1] if i[-1] else i[1]][0]
url = i[1]
if not url.startswith("http://") and not url.startswith("https://"):
url = "/" + url.lower() + ".html"
formattedLink = u"<a href='{0}'>{1}</a>".format(url, title)
html = re.sub(compLink, formattedLink, html, count=1)
#bold and italic
link = r"(//\*\*(.[^*]+)\*\*//)"
compLink = re.compile(link, re.X | re.U)
for i in compLink.findall(html):
url = i[1]
formattedLink = u"<i><b>{0}</b></i>".format(url)
html = re.sub(compLink, formattedLink, html, count=1)
#italic and bold
link = r"(\*\*//(.[^/]+)//\*\*)"
compLink = re.compile(link, re.X | re.U)
for i in compLink.findall(html):
url = i[1]
formattedLink = u"<i><b>{0}</b></i>".format(url)
html = re.sub(compLink, formattedLink, html, count=1)
#bold
link = r"(\*\*(.[^*]+)\*\*)"
compLink = re.compile(link, re.X | re.U)
for i in compLink.findall(html):
url = i[1]
formattedLink = u"<b>{0}</b>".format(url)
html = re.sub(compLink, formattedLink, html, count=1)
#italic
link = r"(//(.[^/]+)//)"
compLink = re.compile(link, re.X | re.U)
for i in compLink.findall(html):
url = i[1]
formattedLink = u"<i>{0}</i>".format(url)
html = re.sub(compLink, formattedLink, html, count=1)
#list item (unordered)
link = r"^\*\s+(.+)$"
compLink = re.compile(link, re.X | re.U | re.M) #need the M = multiline to detect begin/end of string
for i in compLink.findall(html):
url = i
formattedLink = u"<li>{0}</li>".format(url)
html = re.sub(compLink, formattedLink, html, count=1)
return html
##########################################################################################
#store static pages (.html) in memory for faster response
def getStatic(page, vFilePath):
if not StaticPagesCache.has_key(page):
#not in cache? then add it
t = None
#read content of the static file
with open(vFilePath, mode="r") as f:
t = f.read().decode("utf-8")
#and store
StaticPagesCache[page] = t
return StaticPagesCache[page]
##########################################################################################
#default page -> redirect
@app.route('/')
@app.route('/index.html')
def homepage():
#app.logger.warning('A warning occurred (%d apples)', 42)
return redirect('/home.html')
##########################################################################################
#Login page
@app.route('/login', methods=['POST', 'GET'])
def doLogin():
if request.method == "GET":
return render_template("login01.html", pagename="login", isprod=ISPROD, message="")
else:
vLogin = request.form["login"]
vPwd = request.form["pwd"]
if vLogin == Config.get("AdminAccount", "Login") and vPwd == Config.get("AdminAccount", "Password"):
#Login is correct
resp = make_response( redirect("home.html") )
resp.set_cookie ('username', vLogin, expires=datetime.now() + timedelta(days=30))
return resp
else:
#incorrect login
return render_template("login01.html", pagename="login", isprod=ISPROD, message="Login incorrect")
##########################################################################################
#New page creation
@app.route('/new/<page>')
def newPage(page):
#not logged in? go away
if None == request.cookies.get('username'):
return redirect("/home.html")
targetFilePath = os.path.join(ROOTDIR, page.lower() + ".html")
#if page already exists just go there
if os.path.isfile(targetFilePath):
return redirect("/"+ page.lower() +".html")
#make an empty page based on template
#make sure this path is a *safe* path : get the template path from flask (at least where flask expects it)
vFilePath = \
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates') \
+ "/" \
+ Config.get("Design", "NewTemplate")
#get the new page template content
with open(vFilePath, mode="r") as f:
vBody = f.read().decode("utf-8")
#write the new page with the content and do some pattern replace
with open(targetFilePath, 'a') as fout:
os.utime(targetFilePath, None)
vBody = vBody.replace('%PAGE_NAME%', page)
fout.write(vBody)
#...and go there
return redirect("/edit/"+ page.lower())
##########################################################################################
#Edit page online
@app.route('/edit/<page>', methods=['POST', 'GET'])
def editPage(page):
#not logged in? go away
if None == request.cookies.get('username'):
return redirect("/" + page.lower() + ".html")
vBody = None
vFormContent = ""
vYear = datetime.now().strftime('%Y')
#make sure this path is a *safe* path
vFilePath = os.path.join(ROOTDIR, page.lower() + ".html")
vRecentlyUploaded = ""
#get textbox content from the form post param or from disk
if request.method == "POST" and "text" in request.form:
vFormContent = request.form["text"]
vBody = vFormContent
else:
#caching or read from disk
if Config.getboolean("WebConfig", "CACHING"):
vBody = getStatic(page.lower(), vFilePath)
else:
#read content of the static file
with open(vFilePath, mode="r") as f:
vBody = f.read().decode("utf-8")
vFormContent = vBody
#file upload
uploaded_files = request.files.getlist("imgup")
if None != uploaded_files and len(uploaded_files) > 0:
#some files to upload
for f in uploaded_files:
if f and allowed_file(f.filename):
filename = werkzeug.utils.secure_filename(f.filename)
f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
vRecentlyUploaded = vRecentlyUploaded + '<img src="/files/'+filename+'" />'
if vRecentlyUploaded == "":
vRecentlyUploaded = "<i>none.</i>"
#Save or preview or else?
if request.method == "POST" and "SaveOrPreview" in request.form:
if request.form["SaveOrPreview"] == "Save":
#do save
if None != request.cookies.get('username') and "" != vFormContent.strip():
#save IF content is not empty and you are logged in
with open(vFilePath, mode="w") as f:
f.write(vFormContent.strip().encode("utf-8"))
#and redirect
return redirect("/" + page.lower() + ".html")
else:
if request.form["SaveOrPreview"] == "Preview":
#do Preview
pass #do nothing, stay on the page, this will just refresh the preview
#else:
#do nothing :P
#Debug
#if request.method == "POST":
# vFormContent = ",".join(request.form.keys()) + vFormContent
#generate the output by injecting static page content and a couple of variables in the template page
resp = make_response( render_template(Config.get("Design", "EditTemplate"), pagename=page, pagecontent=vBody, year=vYear, isprod=ISPROD, testout=wikilize(vFormContent), recentupload=vRecentlyUploaded))
#Debug
#if "SaveOrPreview" in request.form and request.form["SaveOrPreview"] == "Preview":
# resp.set_cookie ('username', 'xyz')
#Google Chrome version 57 of April 2017 is smarter than everyone and block my code. Let's tell him to stop giving me ERR_BLOCKED_BY_XSS_AUDITOR
resp.headers['X-XSS-Protection'] = '0'
return resp
##########################################################################################
#serving page through template
@app.route('/<page>.html')
@app.route('/<page>')
def serveTemplate(page):
vBody = None
#make sure this path is a *safe* path
vFilePath = os.path.join(ROOTDIR, page.lower() + ".html")
vLastupdate = time.ctime(os.path.getmtime(vFilePath))
#caching or read from disk
if Config.getboolean("WebConfig", "CACHING"):
vBody = getStatic(page.lower(), vFilePath)
else:
#read content of the static file
with open(vFilePath, mode="r") as f:
vBody = f.read().decode("utf-8")
#renders
return renderPageInternal (pPageName=page, pBody=vBody, lastupdate=str(vLastupdate))
##########################################################################################
#Search page
@app.route('/search.aspx')
def searchPage():
searchstring = request.args.get('txbSearch').lower()
vBody = "<h1>All pages containing text '%s':</h1>" % (searchstring)
resultDict = dict()
for filename in os.listdir(ROOTDIR):
if os.path.isfile(os.path.join(ROOTDIR, filename)) and filename.lower().endswith(".html"):
with open(os.path.join(ROOTDIR, filename), mode="r") as f:
t = f.read().decode("utf-8")
#count in the file and the filename
vCount = t.lower().count(searchstring) + filename.lower().count(searchstring)
#if searchstring in t:
if vCount > 0:
resultDict[filename[:-5]] = vCount
vBody = vBody + "<br/>"
sorted_x = sorted(resultDict.items(), key=operator.itemgetter(1))
vResults = ""
for t in sorted_x:
vResults = '❱ <a href="%s">%s</a> <span style="font-size:x-small;">(%d)</span><br/>' % (t[0]+".html",t[0], t[1]) + vResults
vBody = vBody + vResults
#renders
return renderPageInternal (pPageName="Search results", pBody=vBody)
#THE function that calls the page rendering and returns the result
def renderPageInternal (pPageName, pBody, lastupdate="unknown"):
# ledz.ledz_blink()
vYear = datetime.now().strftime('%Y')
#generate the output by injecting static page content and a couple of variables in the template page
return render_template(Config.get("Design", "Template"), pagename=pPageName, pagecontent=wikilize(pBody), year=vYear, isprod=ISPROD, lastupdate=lastupdate)
##########################################################################################
# Return the favicon file
@app.route('/favicon.ico')
def getFavicon():
return app.send_static_file("favicon.ico")
########################################################################################
## Main entry point
#
if __name__ == '__main__':
try:
#load config file
Config.read("electrogeek.ini")
#loading once and for all the config values
ROOTDIR = Config.get("WebConfig", "ROOTDIR")
LOGFILE = Config.get("WebConfig", "LOGFILE")
HTTPPORT = int(Config.get("WebConfig", "HTTPPORT"))
ISPROD = Config.getboolean("WebConfig", "ISPROD")
UPLOAD_FOLDER = os.path.join(ROOTDIR, 'files')
#start the logfile
#logging.basicConfig(filename="/tmp/flasklogging.log",level=logging.DEBUG)
handler = RotatingFileHandler(LOGFILE, maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
if not ISPROD:
app.debug = True
#set the upload folder
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#init the leds
# ledz.ledz_init(Config)
#start serving pages
app.run(host='0.0.0.0', port=HTTPPORT, threaded=True)
finally:
#cleanup
# ledz.ledz_finalize()
pass
|
import uuid
import datetime
from django.db import models
from users.models import User
from typing import List, Dict
from django.db.models.functions import Lower
"""
Models for the meetups
"""
class Tag(models.Model):
"""
Database Model for meetup tags
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
tag_name = models.CharField(max_length=255)
class Meta:
unique_together = ('tag_name',)
def __str__(self):
return self.tag_name
def get_tag_name(self) -> str:
"""
Returns tag name of the object
"""
return self.tag_name
class Image(models.Model):
"""
Database model for image urls for tags
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
image_url = models.URLField(max_length=255)
class Meta:
unique_together = ('image_url',)
def __str__(self):
return self.image_url
def get_image_url(self) -> str:
"""
Returns the image url
"""
return self.image_url
class Meetup(models.Model):
"""
Database Model for Meetups
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=255)
body = models.TextField()
location = models.CharField(max_length=255)
scheduled_date = models.DateTimeField()
tags = models.ManyToManyField(Tag)
image_url = models.ManyToManyField(Image)
creator = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
ordering = ['scheduled_date', '-created_on']
unique_together = (('body', 'scheduled_date', 'location'),
('title', 'scheduled_date', 'location'))
def __str__(self):
return self.title + " on " + self.scheduled_date.strftime('%m-%d-%Y')
@property
def rsvps(self) -> List:
"""
Gets all rsvp for the meetup
"""
from meetups.serializers import FetchRsvpSerializer
rsvps = Rsvp.objects.filter(meetup=self.id)
serializer = FetchRsvpSerializer(rsvps, many=True)
return serializer.data
@property
def rsvp_summary(self) -> Dict:
"""
Gets rsvp summary
"""
maybe_count = Rsvp.objects.filter(
meetup=self.id, response__iexact='maybe').count()
yes_count = Rsvp.objects.filter(
meetup=self.id, response__iexact='yes').count()
no_count = Rsvp.objects.filter(
meetup=self.id, response__iexact='no').count()
result = {
'maybe': maybe_count,
'yes': yes_count,
'no': no_count
}
return result
class Rsvp(models.Model):
"""
Rsvp class to pick user response to a meetup
"""
id = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
created_on = models.DateTimeField(
auto_now_add=True)
updated_on = models.DateTimeField(
auto_now=True)
responder = models.ForeignKey(
User, on_delete=models.CASCADE)
meetup = models.ForeignKey(
Meetup, on_delete=models.CASCADE)
response = models.CharField(
max_length=5)
def __str__(self):
return self.response
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1.identity import v3
from keystoneauth1 import session
class Token(object):
def __init__(self, username, password, project_name,
auth_url, user_domain_name, project_domain_name):
self.username = username
self.password = password
self.auth_url = auth_url
self.project_name = project_name
self.user_domain_name = user_domain_name
self.project_domain_name = project_domain_name
def create_token(self):
auth = v3.Password(auth_url=self.auth_url,
username=self.username,
password=self.password,
project_name=self.project_name,
user_domain_name=self.user_domain_name,
project_domain_name=self.project_domain_name)
sess = session.Session(auth=auth)
token_id = sess.auth.get_token(sess)
return token_id
|
from cppstats.main import run_cppstats
from cppstats.main import get_results
from cppstats.main import write_report
from cppstats.main import create_report |
import glob
import os
import subprocess
from multiprocessing import cpu_count
from typing import List
import h5py
import numpy as np
import open3d as o3d
import tabulate
import tqdm
import trimesh
from joblib import Parallel, delayed
from scipy.interpolate import RegularGridInterpolator
def load_sdf(sdf_file, sdf_res):
intsize = 4
floatsize = 8
sdf = {
"param": [],
"value": []
}
with open(sdf_file, "rb") as f:
try:
bytes = f.read()
ress = np.frombuffer(bytes[:intsize * 3], dtype=np.int32)
if -1 * ress[0] != sdf_res or ress[1] != sdf_res or ress[2] != sdf_res:
raise Exception(sdf_file, "res not consistent with ", str(sdf_res))
positions = np.frombuffer(bytes[intsize * 3:intsize * 3 + floatsize * 6], dtype=np.float64)
# bottom left corner, x,y,z and top right corner, x, y, z
sdf["param"] = [positions[0], positions[1], positions[2], positions[3], positions[4], positions[5]]
sdf["param"] = np.float32(sdf["param"])
sdf["value"] = np.frombuffer(bytes[intsize * 3 + floatsize * 6:], dtype=np.float32)
sdf["value"] = np.reshape(sdf["value"], (sdf_res + 1, sdf_res + 1, sdf_res + 1))
finally:
f.close()
return sdf
def eval_sdf():
synthset = "02876657"
obj_id = "1ffd7113492d375593202bf99dddc268"
disn_prefix = "/home/matthias/Data2/datasets/shapenet/disn"
occnet_prefix = "/home/matthias/Data/Ubuntu/git/occupancy_networks/data/ShapeNet.build"
occnet_path = os.path.join(occnet_prefix, synthset, "4_watertight_scaled", obj_id + ".off")
disn_path = os.path.join(disn_prefix, synthset, obj_id, "isosurf.obj")
disn_sdf_path = os.path.join(disn_prefix, "SDF_v1", synthset, obj_id, "ori_sample.h5")
shapenet_path = os.path.join("/home/matthias/Data2/datasets/shapenet/ShapeNetCore.v1", synthset, obj_id,
"model.obj")
shapenet2_path = os.path.join("/home/matthias/Data2/datasets/shapenet/ShapeNetCore.v2", synthset, obj_id,
"models/model_normalized.obj")
sdfgen_path = os.path.join("/home/matthias/Data2/datasets/shapenet/sdfgen", synthset, obj_id, "output_0.hdf5")
sdf = True
pcd = False
mesh = False
sdfgen = False
if pcd:
with h5py.File(disn_sdf_path, "r") as f:
print("Keys: %s" % f.keys())
points = f["pc_sdf_sample"].value[:, :3]
sdf = f["pc_sdf_sample"].value[:, 3]
inside = points[sdf <= 0]
outside = points[sdf > 0]
pcd = trimesh.PointCloud(inside)
print(pcd.bounds)
pcd.show()
elif mesh:
file = "/home/matthias/Data/Ubuntu/git/DISN/02876657/1ffd7113492d375593202bf99dddc268/1ffd7113492d375593202bf99dddc268.obj"
mesh = o3d.io.read_triangle_mesh(file)
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
size = 0.5 * (mesh.get_max_bound() - mesh.get_min_bound()).max()
frame = o3d.geometry.TriangleMesh().create_coordinate_frame(size=size)
o3d.visualization.draw_geometries([mesh, frame])
# mesh = trimesh.load(file, process=False)
# print(mesh.bounds)
# mesh.show()
elif sdf:
# for file in glob.glob("/home/matthias/Data2/datasets/shapenet/matthias/02876657/**/*.dist"):
file = "/home/matthias/Data/Ubuntu/git/DISN/02876657/1ffd7113492d375593202bf99dddc268/1ffd7113492d375593202bf99dddc268.dist"
sdf = load_sdf(file, 256)
sample = False
if sample:
x = np.linspace(sdf["param"][0], sdf["param"][3], num=257)
y = np.linspace(sdf["param"][1], sdf["param"][4], num=257)
z = np.linspace(sdf["param"][2], sdf["param"][5], num=257)
my_interpolating_function = RegularGridInterpolator((z, y, x), sdf["value"])
num_points = 100000
samples = np.random.random(size=num_points * 3).reshape(num_points, 3)
samples = 2 * 0.49 * samples - 0.49
sdf = 256 * np.hstack([samples, np.expand_dims(my_interpolating_function(samples), axis=1)])
points = sdf[sdf[:, 3] <= 0][:, :3] + 128
else:
sdf = sdf["value"]
print(sdf.shape, sdf.max(), sdf.min())
points = np.argwhere(sdf <= 0)
print(points.shape, points.min(), points.max())
pcd = o3d.geometry.PointCloud()
# ((1 / 256) >= sdf) & (sdf >= -(1 / 256))
pcd.points = o3d.utility.Vector3dVector(points)
pcd.paint_uniform_color([0.3, 0.3, 0.5])
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=30))
# grid = o3d.geometry.VoxelGrid().create_from_point_cloud(input=pcd, voxel_size=1)
size = 0.5 * (pcd.get_max_bound() - pcd.get_min_bound()).max()
points = [[0, 0, 0],
[0, 0, 256],
[0, 256, 256],
[256, 256, 256],
[256, 0, 0],
[256, 256, 0],
[0, 256, 0],
[256, 0, 256]]
lines = [[0, 1], [1, 2], [2, 3], [0, 3],
[4, 5], [5, 6], [6, 7], [4, 7],
[0, 4], [1, 5], [2, 6], [3, 7]]
colors = [[0, 0, 0] for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
frame = o3d.geometry.TriangleMesh().create_coordinate_frame(size=size)
o3d.visualization.draw_geometries([pcd, frame, line_set])
elif sdfgen:
with h5py.File(sdfgen_path, "r") as f:
grid = np.array(f["voxelgrid"][()])
truncation_threshold = 1.0
grid = (grid.astype(np.float64) / grid.max() - 0.5) * truncation_threshold
print(grid.shape, grid.max(), grid.min())
pcd = o3d.geometry.PointCloud()
points = np.argwhere(grid <= 0)
print(points.shape, points.min(), points.max())
pcd.points = o3d.utility.Vector3dVector(points)
pcd.paint_uniform_color([0.3, 0.3, 0.5])
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=30))
size = 0.5 * (pcd.get_max_bound() - pcd.get_min_bound()).max()
box = pcd.get_axis_aligned_bounding_box()
points = [[0, 0, 0],
[0, 0, 256],
[0, 256, 256],
[256, 256, 256],
[256, 0, 0],
[256, 256, 0],
[0, 256, 0],
[256, 0, 256]]
lines = [[0, 1], [1, 2], [2, 3], [0, 3],
[4, 5], [5, 6], [6, 7], [4, 7],
[0, 4], [1, 5], [2, 6], [3, 7]]
colors = [[0, 0, 0] for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
frame = o3d.geometry.TriangleMesh().create_coordinate_frame(size=size)
o3d.visualization.draw_geometries([pcd, frame, line_set])
def eval_agile():
mesh_path = "/home/matthias/Data/Ubuntu/git/occupancy_networks/data/ShapeNet.build/02876657/2_watertight/1071fa4cddb2da2fc8724d5673a063a6.off"
point_path = "/home/matthias/Data/Ubuntu/git/occupancy_networks/data/ShapeNet/02876657/1071fa4cddb2da2fc8724d5673a063a6/pointcloud.npz"
data = np.load("/home/matthias/Data/Ubuntu/data/agile_justin/scene3/scene_data.npy", allow_pickle=True).item()
# for val in np.unique(data['voxel_model']):
# print(val, np.argwhere(data['voxel_model'] == val).shape)
# point_cloud = o3d.io.read_point_cloud("/home/matthias/Data/Ubuntu/data/agile_justin/scene3/scene_points.ply")
# point_cloud.points = o3d.utility.Vector3dVector(np.asarray(point_cloud.points) * data['voxel_size'])
pcd_3 = o3d.geometry.PointCloud()
pcd_3.points = o3d.utility.Vector3dVector(np.argwhere(data['voxel_model'] == 3))
pcd_3.paint_uniform_color([0.8, 0.0, 0.0])
pcd_3.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=30))
pcd_5 = o3d.geometry.PointCloud()
pcd_5.points = o3d.utility.Vector3dVector(np.argwhere(data['voxel_model'] == 5))
pcd_5.paint_uniform_color([0.0, 0.8, 0.0])
pcd_5.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=30))
pcd_6 = o3d.geometry.PointCloud()
pcd_6.points = o3d.utility.Vector3dVector(np.argwhere(data['voxel_model'] == 6))
pcd_6.paint_uniform_color([0.0, 0.0, 0.8])
pcd_6.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=30))
o3d.visualization.draw_geometries([pcd_3, pcd_5, pcd_6])
voxel_grid_5 = o3d.geometry.VoxelGrid().create_from_point_cloud(input=pcd_5, voxel_size=1)
voxel_grid_6 = o3d.geometry.VoxelGrid().create_from_point_cloud(input=pcd_6, voxel_size=1)
o3d.visualization.draw_geometries([voxel_grid_5, voxel_grid_6])
# start = time.time()
# for _ in range(10):
# trimesh.load(mesh_path).sample(100000)
# print(time.time() - start)
#
# start = time.time()
# for _ in range(10):
# o3d.io.read_triangle_mesh(filename=mesh_path).sample_points_uniformly(number_of_points=100000)
# print(time.time() - start)
#
# start = time.time()
# for _ in range(10):
# np.load(point_path)
# print(time.time() - start)
# mesh.compute_triangle_normals()
# mesh.compute_vertex_normals()
# o3d.visualization.draw_geometries([mesh])
# voxel_grid = o3d.geometry.VoxelGrid.create_from_triangle_mesh(mesh, voxel_size=1 / 32)
# o3d.visualization.draw_geometries([voxel_grid])
def convert(meshes: List[str], in_format: str = ".off", out_format: str = ".ply"):
def run(mesh):
command = f"meshlabserver -i {mesh} -o {mesh.replace(in_format, out_format)}"
subprocess.run(command.split(' '), stdout=subprocess.DEVNULL)
with Parallel(n_jobs=cpu_count()) as parallel:
parallel(delayed(run)(mesh) for mesh in tqdm.tqdm(meshes))
def edges_to_lineset(mesh, edges, color):
ls = o3d.geometry.LineSet()
ls.points = mesh.vertices
ls.lines = edges
colors = np.empty((np.asarray(edges).shape[0], 3))
colors[:] = color
ls.colors = o3d.utility.Vector3dVector(colors)
return ls
def check_properties(mesh: str, visualize: bool = False) -> List[bool]:
mesh.compute_vertex_normals()
edge_manifold = mesh.is_edge_manifold(allow_boundary_edges=True)
edge_manifold_boundary = mesh.is_edge_manifold(allow_boundary_edges=False)
vertex_manifold = mesh.is_vertex_manifold()
self_intersecting = mesh.is_self_intersecting()
# watertight = mesh.is_watertight()
watertight = False
orientable = mesh.is_orientable()
properties = [edge_manifold,
edge_manifold_boundary,
vertex_manifold,
self_intersecting,
watertight,
orientable]
keys = ["Edge Manifold",
"Edge Manifold Boundary",
"Vertex Manifold",
"Self Intersecting",
"Watertight",
"Orientable"]
data = dict(zip(keys, properties))
print(data)
# table = tabulate.tabulate(data, headers="keys")
# print(table)
if visualize:
geoms = [mesh]
if not edge_manifold:
edges = mesh.get_non_manifold_edges(allow_boundary_edges=True)
geoms.append(edges_to_lineset(mesh, edges, (1, 0, 0)))
if not edge_manifold_boundary:
edges = mesh.get_non_manifold_edges(allow_boundary_edges=False)
geoms.append(edges_to_lineset(mesh, edges, (0, 1, 0)))
if not vertex_manifold:
verts = np.asarray(mesh.get_non_manifold_vertices())
pcl = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(np.asarray(mesh.vertices)[verts]))
pcl.paint_uniform_color((0, 0, 1))
geoms.append(pcl)
if self_intersecting:
intersecting_triangles = np.asarray(mesh.get_self_intersecting_triangles())
intersecting_triangles = intersecting_triangles[0:1]
intersecting_triangles = np.unique(intersecting_triangles)
triangles = np.asarray(mesh.triangles)[intersecting_triangles]
edges = [np.vstack((triangles[:, i], triangles[:, j])) for i, j in [(0, 1), (1, 2), (2, 0)]]
edges = np.hstack(edges).T
edges = o3d.utility.Vector2iVector(edges)
geoms.append(edges_to_lineset(mesh, edges, (1, 0, 1)))
o3d.visualization.draw_geometries(geoms, mesh_show_back_face=True)
return properties
def repair_mesh(mesh):
mesh = mesh.remove_degenerate_triangles()
mesh = mesh.remove_duplicated_triangles()
mesh = mesh.remove_duplicated_vertices()
mesh = mesh.remove_non_manifold_edges()
mesh.orient_triangles()
return mesh
def mesh_test():
synthset = "02876657"
shapenet_v1_path = f"/home/matthias/Data2/datasets/shapenet/ShapeNetCore.v1/{synthset}/**/model.obj"
shapenet_v2_path = f"/home/matthias/Data2/datasets/shapenet/ShapeNetCore.v2/{synthset}/**/models/model_normalized.obj"
occnet_path = f"/home/matthias/Data/Ubuntu/git/occupancy_networks/data/ShapeNet.build/{synthset}/2_watertight/*.off"
disn_path = f"/home/matthias/Data2/datasets/shapenet/disn/{synthset}/**/isosurf.obj"
my_disn_path = f"/home/matthias/Data2/datasets/shapenet/matthias/disn/{synthset}/**/*.obj"
my_disn_from_occnet_path = f"/home/matthias/Data2/datasets/shapenet/matthias/normalized/*.obj"
manifoldplus_path = f"/home/matthias/Data2/datasets/shapenet/matthias/manifold/{synthset}/*.obj"
meshes = sorted(glob.glob(disn_path))
results = list()
for mesh in tqdm.tqdm(meshes):
# mesh_off = mesh.replace('obj', 'off')
mesh = trimesh.load(mesh, force="mesh", process=False)
if not mesh.is_watertight:
o3d_mesh = o3d.geometry.TriangleMesh()
o3d_mesh.vertices = o3d.utility.Vector3dVector(mesh.vertices)
o3d_mesh.triangles = o3d.utility.Vector3iVector(mesh.faces)
check_properties(o3d_mesh, visualize=True)
results.append(mesh.is_watertight)
# results.append(o3d.io.read_triangle_mesh(mesh, enable_post_processing=True).is_watertight())
# mesh = o3d.io.read_triangle_mesh(mesh)
# mesh = mesh.simplify_vertex_clustering(voxel_size=0.01)
# mesh.compute_triangle_normals()
# mesh.compute_vertex_normals()
# o3d.visualization.draw_geometries([mesh])
# results.append(check_properties(mesh)[-2])
# os.remove(mesh_off)
print(len(results), np.sum(results), np.sum(results) / len(results))
# Results synthset 02876657:
# ShapeNet v1: 498 11 0.02208835341365462
# ShapeNet v2: 498 13 0.02610441767068273
# OccNet: 498 463 0.929718875502008
# DISN: 498 293 0.5883534136546185
# My DISN: 498 401 0.8052208835341366
# My DISN from OccNet: 498 175 0.3514056224899598
if __name__ == "__main__":
meshes = glob.glob("/home/matthias/Data/Ubuntu/git/occupancy_networks/data/ShapeNet.build/02876657/4_watertight_scaled/*.off")
convert(meshes)
|
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
import logging
import tensorflow as tf
log = logging.getLogger(__name__)
def main():
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
print(tf.__version__)
if __name__ == "__main__":
main()
|
from hendrix.experience import crosstown_traffic
from hendrix.mechanics.concurrency.decorators import _ThroughToYou
def crosstownTaskListDecoratorFactory(list_to_populate):
class TaskListThroughToYou(_ThroughToYou):
def __init__(self, *args, **kwargs):
self.crosstown_task_list = list_to_populate
super(TaskListThroughToYou, self).__init__(*args, **kwargs)
def responseless_fallback(self, crosstown_task):
self.crosstown_task_list.append(crosstown_task)
return TaskListThroughToYou
class AsyncTestMixin(object):
def setUp(self):
self.sub_setUp()
return super(AsyncTestMixin, self).setUp()
def sub_setUp(self):
self.recorded_tasks = []
crosstown_traffic.decorator = crosstownTaskListDecoratorFactory(self.recorded_tasks)
crosstown_traffic()
self.archived_tasks = []
def next_task(self):
for task in self.recorded_tasks:
if task not in self.archived_tasks:
self.archived_tasks.append(task)
return task
raise StopIteration("No more tasks.")
def assertNumCrosstownTasks(self, num_tasks):
if not num_tasks == len(self.recorded_tasks):
raise AssertionError(
"There were not %s recorded tasks. The recorded tasks were: %s" % (num_tasks, self.recorded_tasks))
|
""" Generate model spectra, add model attribute """
def draw_spectra(md, ds):
""" Generate best-fit spectra for all the test objects
Parameters
----------
md: model
The Cannon spectral model
ds: Dataset
Dataset object
Returns
-------
best_fluxes: ndarray
The best-fit test fluxes
best_ivars:
The best-fit test inverse variances
"""
coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model
nstars = len(dataset.test_SNR)
cannon_flux = np.zeros(dataset.test_flux.shape)
cannon_ivar = np.zeros(dataset.test_ivar.shape)
for i in range(nstars):
x = label_vector[:,i,:]
spec_fit = np.einsum('ij, ij->i', x, coeffs_all)
cannon_flux[i,:] = spec_fit
bad = dataset.test_ivar[i,:] == SMALL**2
cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2
return cannon_flux, cannon_ivar
def overlay_spectra(model, dataset):
""" Run a series of diagnostics on the fitted spectra
Parameters
----------
model: model
best-fit Cannon spectral model
dataset: Dataset
original spectra
"""
best_flux, best_ivar = draw_spectra(model, dataset)
coeffs_all, covs, scatters, all_chisqs, pivots, label_vector = model.model
# Overplot original spectra with best-fit spectra
print("Overplotting spectra for ten random stars")
res = dataset.test_flux-best_flux
lambdas = dataset.wl
npix = len(lambdas)
nstars = best_flux.shape[0]
pickstars = []
for i in range(10):
pickstars.append(random.randrange(0, nstars-1))
for i in pickstars:
print("Star %s" % i)
ID = dataset.test_ID[i]
spec_orig = dataset.test_flux[i,:]
bad = dataset.test_flux[i,:] == 0
lambdas = np.ma.array(lambdas, mask=bad, dtype=float)
npix = len(lambdas.compressed())
spec_orig = np.ma.array(dataset.test_flux[i,:], mask=bad)
spec_fit = np.ma.array(best_flux[i,:], mask=bad)
ivars_orig = np.ma.array(dataset.test_ivar[i,:], mask=bad)
ivars_fit = np.ma.array(best_ivar[i,:], mask=bad)
red_chisq = np.sum(all_chisqs[:,i], axis=0) / (npix - coeffs_all.shape[1])
red_chisq = np.round(red_chisq, 2)
fig,axarr = plt.subplots(2)
ax1 = axarr[0]
im = ax1.scatter(lambdas, spec_orig, label="Orig Spec",
c=1 / np.sqrt(ivars_orig), s=10)
ax1.scatter(lambdas, spec_fit, label="Cannon Spec", c='r', s=10)
ax1.errorbar(lambdas, spec_fit,
yerr=1/np.sqrt(ivars_fit), fmt='ro', ms=1, alpha=0.7)
ax1.set_xlabel(r"Wavelength $\lambda (\AA)$")
ax1.set_ylabel("Normalized flux")
ax1.set_title("Spectrum Fit: %s" % ID)
ax1.set_title("Spectrum Fit")
ax1.set_xlim(min(lambdas.compressed())-10, max(lambdas.compressed())+10)
ax1.legend(loc='lower center', fancybox=True, shadow=True)
ax2 = axarr[1]
ax2.scatter(spec_orig, spec_fit, c=1/np.sqrt(ivars_orig), alpha=0.7)
ax2.errorbar(spec_orig, spec_fit, yerr=1 / np.sqrt(ivars_fit),
ecolor='k', fmt="none", ms=1, alpha=0.7)
#fig.subplots_adjust(right=0.8)
#cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar()
#fig.colorbar(
# im, cax=cbar_ax,
# label="Uncertainties on the Fluxes from the Original Spectrum")
xlims = ax2.get_xlim()
ylims = ax2.get_ylim()
lims = [np.min([xlims, ylims]), np.max([xlims, ylims])]
ax2.plot(lims, lims, 'k-', alpha=0.75)
textstr = "Red Chi Sq: %s" % red_chisq
props = dict(boxstyle='round', facecolor='palevioletred', alpha=0.5)
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.set_xlim(xlims)
ax2.set_ylim(ylims)
ax2.set_xlabel("Orig Fluxes")
ax2.set_ylabel("Fitted Fluxes")
plt.tight_layout()
filename = "best_fit_spec_Star%s.png" % i
print("Saved as %s" % filename)
fig.savefig(filename)
plt.close(fig)
def residuals(cannon_set, dataset):
""" Stack spectrum fit residuals, sort by each label. Include histogram of
the RMS at each pixel.
Parameters
----------
cannon_set: Dataset
best-fit Cannon spectra
dataset: Dataset
original spectra
"""
print("Stacking spectrum fit residuals")
res = dataset.test_fluxes - cannon_set.test_fluxes
bad = dataset.test_ivars == SMALL**2
err = np.zeros(len(dataset.test_ivars))
err = np.sqrt(1. / dataset.test_ivars + 1. / cannon_set.test_ivars)
res_norm = res / err
res_norm = np.ma.array(res_norm,
mask=(np.ones_like(res_norm) *
(np.std(res_norm,axis=0) == 0)))
res_norm = np.ma.compress_cols(res_norm)
for i in range(len(cannon_set.get_plotting_labels())):
label_name = cannon_set.get_plotting_labels()[i]
print("Plotting residuals sorted by %s" % label_name)
label_vals = cannon_set.tr_label_vals[:,i]
sorted_res = res_norm[np.argsort(label_vals)]
mu = np.mean(sorted_res.flatten())
sigma = np.std(sorted_res.flatten())
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.1
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.1, height]
plt.figure()
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
im = axScatter.imshow(sorted_res, cmap=plt.cm.bwr_r,
interpolation="nearest", vmin=mu - 3. * sigma,
vmax=mu + 3. * sigma, aspect='auto',
origin='lower', extent=[0, len(dataset.wl),
min(label_vals),
max(label_vals)])
cax, kw = colorbar.make_axes(axScatter.axes, location='bottom')
plt.colorbar(im, cax=cax, orientation='horizontal')
axScatter.set_title(
r"Spectral Residuals Sorted by ${0:s}$".format(label_name))
axScatter.set_xlabel("Pixels")
axScatter.set_ylabel(r"$%s$" % label_name)
axHisty.hist(np.std(res_norm,axis=1)[~np.isnan(np.std(res_norm, axis=1))], orientation='horizontal', range=[0,2])
axHisty.axhline(y=1, c='k', linewidth=3, label="y=1")
axHisty.legend(bbox_to_anchor=(0., 0.8, 1., .102),
prop={'family':'serif', 'size':'small'})
axHisty.text(1.0, 0.5, "Distribution of Stdev of Star Residuals",
verticalalignment='center', transform=axHisty.transAxes,
rotation=270)
axHisty.set_ylabel("Standard Deviation")
start, end = axHisty.get_xlim()
axHisty.xaxis.set_ticks(np.linspace(start, end, 3))
axHisty.set_xlabel("Number of Stars")
axHisty.xaxis.set_label_position("top")
axHistx.hist(np.std(res_norm, axis=0)[~np.isnan(np.std(res_norm, axis=0))], range=[0.8,1.1])
axHistx.axvline(x=1, c='k', linewidth=3, label="x=1")
axHistx.set_title("Distribution of Stdev of Pixel Residuals")
axHistx.set_xlabel("Standard Deviation")
axHistx.set_ylabel("Number of Pixels")
start, end = axHistx.get_ylim()
axHistx.yaxis.set_ticks(np.linspace(start, end, 3))
axHistx.legend()
filename = "residuals_sorted_by_label_%s.png" % i
plt.savefig(filename)
print("File saved as %s" % filename)
plt.close()
# Auto-correlation of mean residuals
print("Plotting Auto-Correlation of Mean Residuals")
mean_res = res_norm.mean(axis=0)
autocorr = np.correlate(mean_res, mean_res, mode="full")
pkwidth = int(len(autocorr)/2-np.argmin(autocorr))
xmin = int(len(autocorr)/2)-pkwidth
xmax = int(len(autocorr)/2)+pkwidth
zoom_x = np.linspace(xmin, xmax, len(autocorr[xmin:xmax]))
fig, axarr = plt.subplots(2)
axarr[0].plot(autocorr)
axarr[0].set_title("Autocorrelation of Mean Spectral Residual")
axarr[0].set_xlabel("Lag (# Pixels)")
axarr[0].set_ylabel("Autocorrelation")
axarr[1].plot(zoom_x, autocorr[xmin:xmax])
axarr[1].set_title("Central Peak, Zoomed")
axarr[1].set_xlabel("Lag (# Pixels)")
axarr[1].set_ylabel("Autocorrelation")
filename = "residuals_autocorr.png"
plt.savefig(filename)
print("saved %s" % filename)
plt.close()
|
__author__ = 'Rolf Jagerman'
import roslib; roslib.load_manifest('aidu_gui')
from PySide import QtGui, QtCore
from PySide.QtGui import QApplication
from time import sleep
from window import Window
from ros_thread import ROSThread
class Manager:
"""
The manager for the application GUI. This internally handles all the other GUI elements that are necessary to
display the system. It also handles shutdown events and appropriately shuts down the ROS thread whenever the user
wants to exit the application.
"""
def __init__(self):
pass
@staticmethod
def setup():
Manager.app = QApplication([])
Manager.app.aboutToQuit.connect(Manager.exit)
font = Manager.app.font()
font.setPointSize(18)
Manager.app.setFont(font)
Manager.window = Window()
Manager.window.showFullScreen()
Manager.window.activateWindow()
Manager.ros_thread = ROSThread(Manager.app)
Manager.ros_thread.start()
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_Q), Manager.window,
Manager.window.close)
@staticmethod
def execute():
"""
Starts execution of the GUI. Returns the application's exit code when it is shut down.
"""
return Manager.app.exec_()
@staticmethod
def exit():
"""
Callback function for when the user exits the application.
This will attempt to stop the ROS thread and will wait before shutting the GUI down.
"""
Manager.ros_thread.stop()
while not Manager.ros_thread.done:
try:
sleep(0.1)
except KeyboardInterrupt:
break
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
import unittest
#from test import test_diff
if __name__ == '__main__':
SeTestSuite = unittest.defaultTestLoader.discover(start_dir='./')
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(SeTestSuite)) |
from django.contrib import admin
from megasena.models import Draw
admin.site.register(Draw)
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import argparse
from types import SimpleNamespace
from urllib.parse import urlparse
from gitlab import Gitlab, GitlabAuthenticationError, GitlabGetError
def last_good_pipeline(client, project_name, branch):
pipelines = client.projects.get(project_name).pipelines.list(as_list=False)
# Pipelines are returned in reverse chronological order.
return next(p for p in pipelines
if p.status == 'success' and p.ref == branch)
def args_from_cli():
parser = argparse.ArgumentParser(
description='Print the Git hash of the last commit to a project '
'on a given branch for which a pipeline successful.',
)
parser.add_argument('project', help='fully qualified project name')
parser.add_argument('branch', help='')
parser.add_argument('--token', help='Gitlab private token for API access.')
parser.add_argument('--gitlab', default='https://gitlab.com')
return parser.parse_args()
def args_from_env():
env = os.environ
# spltting on '@' is necessary because CI specifies repository URL
# as 'gitlab-ci-token:xxxx@<gitlab host>'
*_, gitlab_host = urlparse(env['CI_REPOSITORY_URL']).netloc.split('@')
gitlab_url = 'https://' + gitlab_host
return SimpleNamespace(
project=env['CI_PROJECT_PATH'],
branch=env['CI_COMMIT_REF_NAME'],
token=env.get('GITLAB_API_TOKEN'),
gitlab=gitlab_url,
)
def am_ci_job():
return 'GITLAB_CI' in os.environ
def main():
args = args_from_env() if am_ci_job() else args_from_cli()
client = Gitlab(args.gitlab, args.token, api_version=4)
try:
if args.token:
client.auth()
print(last_good_pipeline(client, args.project, args.branch).sha)
except GitlabAuthenticationError:
print('Authentication error: did you provide a valid token?',
file=sys.stderr)
exit(1)
except GitlabGetError as err:
response = json.loads(err.response_body.decode())
print('Problem contacting Gitlab: {}'
.format(response['message']),
file=sys.stderr)
exit(1)
except StopIteration:
print('No successful pipelines for branch {}'.format(args.branch),
file=sys.stderr)
exit(1)
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.4 on 2020-12-07 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CustomerApps', '0003_auto_20201207_1922'),
]
operations = [
migrations.RemoveField(
model_name='customerapp',
name='paid_status',
),
migrations.AddField(
model_name='customerapp',
name='status',
field=models.CharField(default='Test', max_length=20),
),
migrations.AlterField(
model_name='customerapp',
name='token',
field=models.CharField(max_length=10, unique=True),
),
]
|
import pandas as pd
import numpy as np
import os
import json
from datetime import datetime
# Load config.json and get input and output paths
with open('config.json', 'r') as f:
config = json.load(f)
input_folder_path = os.path.join(os.getcwd(), config['input_folder_path'])
output_folder_path = os.path.join(os.getcwd(), config['output_folder_path'])
# Function for data ingestion
def merge_multiple_dataframe():
# check for datasets, compile them together, and write to an output file
df_list = pd.DataFrame(columns=["corporation", "lastmonth_activity", "lastyear_activity",
"number_of_employees", "exited"])
filenames = os.listdir(input_folder_path)
for each_filename in filenames:
_df = pd.read_csv(os.path.join(input_folder_path, each_filename))
df_list = df_list.append(_df)
result = df_list.drop_duplicates()
if not os.path.exists(output_folder_path):
os.mkdir(output_folder_path)
result.to_csv(os.path.join(output_folder_path, "finaldata.csv"), index=False)
else:
result.to_csv(os.path.join(output_folder_path, "finaldata.csv"), index=False)
# Function for ingestion record keeping
def output_ingestion_record(source_location, output_location):
filenames = os.listdir(source_location)
with open(os.path.join(output_location, "ingestedfiles.txt"), "w") as report_file:
for file in filenames:
report_file.write(file + "\n")
if __name__ == '__main__':
merge_multiple_dataframe()
output_ingestion_record(source_location=input_folder_path,
output_location=output_folder_path)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division, print_function
import unittest
from adsmutils import ADSFlask
class TestUpdateRecords(unittest.TestCase):
def test_config(self):
app = ADSFlask(u'test', local_config={
u'FOO': [u'bar', {}],
u'SQLALCHEMY_DATABASE_URI': u'sqlite:///',
})
self.assertEqual(app._config[u'FOO'], [u'bar', {}])
self.assertEqual(app.config[u'FOO'], [u'bar', {}])
self.assertTrue(app.db)
if __name__ == '__main__':
unittest.main()
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DescribeUrlBlockRegulationResp(object):
def __init__(self, id=None, userPin=None, regulationName=None, blockUrl=None, zoneId=None, filterId=None, firewallRuleId=None, opType=None, enableRegulation=None, createTime=None, createUser=None, updateTime=None, updateUser=None):
"""
:param id: (Optional) 封禁URL规则id
:param userPin: (Optional) url对应的用户pin
:param regulationName: (Optional) 封禁URL规则名称
:param blockUrl: (Optional) 封禁的url
:param zoneId: (Optional) zone id
:param filterId: (Optional) filter id
:param firewallRuleId: (Optional) firewall rule id
:param opType: (Optional) 操作类型(0->阻断)
:param enableRegulation: (Optional) 规则开启状态(0->关闭 1->开启)
:param createTime: (Optional) 规则创建时间
:param createUser: (Optional) 规则创建者
:param updateTime: (Optional) 规则更新时间
:param updateUser: (Optional) 规则更新者
"""
self.id = id
self.userPin = userPin
self.regulationName = regulationName
self.blockUrl = blockUrl
self.zoneId = zoneId
self.filterId = filterId
self.firewallRuleId = firewallRuleId
self.opType = opType
self.enableRegulation = enableRegulation
self.createTime = createTime
self.createUser = createUser
self.updateTime = updateTime
self.updateUser = updateUser
|
from collections import OrderedDict, namedtuple
from itertools import count
from typing import Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import numpy.random as nr
from numba import njit
import pandas as pd
from pandas import DataFrame
Prob = float
def as_array(s: Union[np.array, pd.Series]):
try:
return s.values
except AttributeError:
return s
def unload(dct: Dict, ks: Union[List[str], str]) -> List[float]:
if isinstance(ks, str):
ks = ks.split()
return [dct[k] for k in ks]
###############
# Simulations #
###############
def gen_probs(abgd: List[float], n=10):
pa, pb, ta, tb = abgd
p = nr.beta(pa, pb, size=n)
th = nr.beta(ta, tb, size=n)
return p, th
def gen_buy_die(
n_opps,
n_users,
abgd: List[float],
p_th: Optional[Tuple[Sequence[Prob], Sequence[Prob]]] = None,
n_opps_name="n_opps",
seed=0,
):
"""
Given `n_opps` window size of opportunity, simulate Buy 'Til You Die process
for `n_users` users.
If arrays for latent variables p_th = (p, theta) are not passed, then these
are drawn from beta distribution.
"""
nr.seed(seed)
if p_th is None:
p, th = gen_probs(abgd, n=n_users)
else:
p, th = p_th
txs, xs = np.empty_like(p, dtype=np.int), np.empty_like(p, dtype=np.int)
bern = lambda p: nr.binomial(1, p, size=n_opps)
for i, pi, thi in zip(count(), p, th):
buys, dies = bern(pi), bern(thi)
xs[i], txs[i] = get_x_tx(buys, dies)
ret = DataFrame(
OrderedDict([("p", p), ("th", th), ("frequency", xs), ("recency", txs)])
).assign(**{n_opps_name: n_opps})
return ret
@njit
def get_x_tx(buys, dies) -> Tuple[int, int]:
"""
Converts simulated 'buy'/'die' events into frequency/recency
counts.
"""
x = 0
tx = 0
for opp, buy, die in zip(range(1, len(buys) + 1), buys, dies):
if die:
break
if buy:
x += 1
tx = opp
return x, tx
_AbgdParams = namedtuple("AbgdParams", ["a", "b", "g", "d"])
class AbgdParams(_AbgdParams):
def mod_param(self, **par_fns):
dct = self._asdict().copy()
for par_letter, f in par_fns.items():
dct[par_letter] = f(dct[par_letter])
return self.from_dct(dct)
@property
def _greek_dct_unicode(self):
dct = self._asdict().copy()
for name, letter in zip(["α", "β", "γ", "δ"], "abgd"):
dct[name] = dct.pop(letter)
return dct
@classmethod
def from_dct(cls, dct):
return cls(*[dct[k] for k in "abgd"])
@classmethod
def from_greek_dct(cls, dct):
return cls(*[dct[k] for k in ("alpha", "beta", "gamma", "delta")])
def __repr__(self):
st_repr = ", ".join(
"{}: {:.1f}".format(k, v) for k, v in self._greek_dct_unicode.items()
)
return "BGBB Hyperparams <{}>".format(st_repr)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='home'),
path('post/<int:pk>/', views.post_detail, name='post-detail'),
path('post/create/', views.post_create, name='post-create'),
path('post/<int:pk>/update/', views.post_update, name='post-update'),
path('post/<int:pk>/delete/', views.post_delete, name='post-delete'),
]
|
#!/usr/bin/env python3
from setuptools import setup, Command as _Command
from pathlib import Path
from shutil import rmtree
import os
import sys
HERE = Path(__file__).absolute().parent
sys.path.insert(0, str(HERE / 'src'))
import httpx_html # noqa: E402
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
def print_bold(string):
'''Prints things in bold.
'''
print(f'\033[1m{string}\033[0m', flush=True)
class Command(_Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class UploadCommand(Command):
description = 'Build and publish the package.'
def run(self):
try:
print_bold('Removing previous builds…')
rmtree(os.path.join(str(HERE), 'dist'))
except OSError:
pass
print_bold('Building Source and Wheel distribution…')
os.system(f'{sys.executable} setup.py sdist bdist_wheel')
print_bold('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
print_bold('Publishing git tags…')
os.system(f'git tag v{httpx_html.__version__}')
os.system('git push --tags')
sys.exit()
class MakeDocsCommand(Command):
description = 'Make documentation.'
def run(self):
print_bold('Making documentation...')
os.chdir(str(HERE / 'docs'))
os.system('make html')
# print_bold('Staging changes...')
# os.chdir(str(HERE / 'docs/build/html'))
# os.system('git add --all')
# os.system('git commit -m "docs: updates"')
# print_bold('Publishing GH Pages')
# os.system('git push origin gh-pages')
sys.exit()
if __name__ == '__main__':
setup()
|
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
import time
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('name', os.getenv('MINECRAFT_SERVER_NAME'), 'Server name.')
flags.DEFINE_integer('port', os.getenv(
'MINECRAFT_SERVER_PORT'), 'Server port.')
flags.DEFINE_float('interval_seconds', 1.0, 'Advertise interval in seconds.')
def main(argv):
del argv # Unused.
BROADCAST_IP = '224.0.2.60'
BROADCAST_PORT = 4445
if not FLAGS.name:
logging.fatal(
'Missing required --name or MINECRAFT_SERVER_NAME environment variable.')
if not FLAGS.port:
logging.fatal(
'Missing required flag --port or MINECRAFT_SERVER_PORT environment variable.')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
logging.info('Advertising Minecraft server "%s" on port %d.', FLAGS.name,
FLAGS.port)
logging.info('Advertise interval: %f seconds', FLAGS.interval_seconds)
message = str.encode('[MOTD]%s[/MOTD][AD]%d[/AD]' %
(FLAGS.name, FLAGS.port))
while True:
try:
sock.sendto(message, (BROADCAST_IP, BROADCAST_PORT))
time.sleep(FLAGS.interval_seconds)
except:
logging.info('Shutting down.')
break
if __name__ == '__main__':
app.run(main)
|
import argparse
from testing.TestGenerator import TestGenerator
from testing.agents.AgentTestGenerator import AgentTestGenerator
from constants import NUM_CHUNKS
from featnames import VALIDATION
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--byr', action='store_true')
parser.add_argument('--slr', action='store_true')
parser.add_argument('--num', type=int, default=1,
choices=range(1, NUM_CHUNKS+1))
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
if args.byr or args.slr:
assert not (args.slr and args.byr)
gen = AgentTestGenerator(verbose=args.verbose,
byr=args.byr,
slr=args.slr)
else:
gen = TestGenerator(verbose=args.verbose)
gen.process_chunk(part=VALIDATION, chunk=args.num-1)
if __name__ == '__main__':
main()
|
from datetime import datetime
from elasticsearch_dsl import DocType, String, Date, Integer, Float
from elasticsearch_dsl.connections import connections
# Define a default Elasticsearch client
connections.create_connection(hosts=['192.168.1.122:9876'])
WEEKDAYS = ('Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun')
class SensorLog(DocType):
sensor_name = String(index='not_analyzed')
temperature = Float()
moisture = Integer()
light = Integer()
timestamp = Date()
# timeparts for fast queries and aggs later, calculated during save
day_of_week = String(index='not_analyzed') # Mon-Fri
day_of_year = Integer() # 1-366
day_of_month = Integer() # 1-31
week_of_year = Integer() # 1-53
month = Integer() # 1-12 === Jan-Dec
year = Integer()
hour = Integer()
minute = Integer()
class Meta:
index = 'sensor_log'
def save(self, ** kwargs):
self.day_of_week = WEEKDAYS[self.timestamp.weekday()]
self.day_of_year = self.timestamp.timetuple().tm_yday
self.day_of_month = self.timestamp.day
self.week_of_year = self.timestamp.isocalendar()[1]
self.month = self.timestamp.month
self.year = self.timestamp.year
self.hour = self.timestamp.hour
self.minute = self.timestamp.minute
return super(SensorLog, self).save(** kwargs)
def is_published(self):
return datetime.now() < self.published_from
# create the mappings in elasticsearch
def create_index():
SensorLog.init()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.INFO)
from notepad import main
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_demo.items import POIItem
from scrapy.selector import Selector
class PoiSpider(scrapy.Spider):
name = "poi"
allowed_domains = ["poi86.com"]
start_urls = ['http://www.poi86.com/poi/district/1289/1.html']
def parse(self, response):
i = 0
for poi_tr in response.css("table.table>tr"):
poi = POIItem()
poi['name'] = poi_tr.xpath('td[1]/a/text()').extract()
poi['path'] = poi_tr.xpath('td[1]/a/@href').extract()
poi['address'] = poi_tr.xpath('td[2]/text()').extract()
yield poi
i=i+1
if i > 2:
break
|
import numpy as np
from utils.environ import env
from utils.helpers import *
from utils.db import SqclDB
from utils.mlogging import mlogging
from utils.exception import ExceptiontContainer
from utils.batch import iter_block_batches, iter_merged_block_batches
from models.dssm import DSSM, DSSMConfig
import os
from math import ceil
import json
def train_expt(model_id, expt_mode, model_config, note=None, to_log=True, **expt_params):
if to_log:
log_expt_params('dssm', expt_mode, model_id, {**model_config.asdict(), **expt_params}, note, line_max_len=120)
train(model_id=model_id, model_config=model_config, to_log=to_log, **expt_params)
def train(model_id, load_model, model_config, bn_training, log_tfvars, log_freqs, max_step,
block_repo_name, epochs, val_ratio, sample_weight_type=None, batch_num=None, block_seed=18035, to_log=True):
logger = mlogging.get_logger(('dssm/%s.train.log'%model_id if to_log else None), prefix='DSSM')
logger.info('model id: %s (train)'%model_id)
model = DSSM(model_id, load_model, model_config, log_tfvars, logger)
block_repo_path = relpath('batch/%s'%block_repo_name, env('SQCL_DIR'))
np.random.seed(block_seed)
if os.path.exists(block_repo_path+'/sub_repo.0'): # 檢查是否包含sub_repo
block_seqs = np.random.permutation(len(os.listdir(block_repo_path+'/sub_repo.0')))
else:
block_seqs = np.random.permutation(len(os.listdir(block_repo_path)))
val_block_num = ceil(len(block_seqs) * val_ratio) # 將train和val所使用的block_range分開
if batch_num is not None:
val_batch_num = round(batch_num * val_ratio)
train_batch_num = batch_num - val_batch_num
assert train_batch_num>=1 and val_batch_num>=1, 'val_ratio or batch_num is too small'
else:
val_batch_num, train_batch_num = None, None
batch_gen = lambda sub_repo_seq=None: iter_block_batches(block_repo_path, 'train', sub_repo_seq=sub_repo_seq, block_range=block_seqs[val_block_num:],
get_adj=False, batch_limit=train_batch_num, sample_weight_type=sample_weight_type)
val_batch_gen = lambda sub_repo_seq=None: iter_block_batches(block_repo_path, 'train', sub_repo_seq=sub_repo_seq, block_range=block_seqs[:val_block_num],
get_adj=False, batch_limit=val_batch_num, sample_weight_type=sample_weight_type)
model.train(batch_gen, val_batch_gen, epochs, bn_training, log_freqs, max_step, report_upon_oom=True)
def search_expt(model_id, block_repo_name, rel_calc_freq, top_k, to_log=False):
logger = mlogging.get_logger(('dssm/%s.search.log'%model_id if to_log else None), prefix='DSSM')
logger.info('model id: %s (search)'%model_id)
model = DSSM(model_id, load_model=True, logger=logger)
block_repo_path_proto = relpath('batch/%s/%%s'%block_repo_name, env('SQCL_DIR'))
test_queries_batch_gen = lambda: iter_merged_block_batches(block_repo_path_proto%'test_queries', 'search')
docs_batch_gen = lambda: iter_merged_block_batches(block_repo_path_proto%'docs', 'search')
logger.info('Loading records.')
rec_ids = []
for rec_type in ['test_query', 'doc']:
with open(block_repo_path_proto%(rec_type+'_ids.json')) as f:
rec_ids.append(json.load(f))
test_query_ids, doc_ids = rec_ids
logger.info('Records loaded.')
search_ranks = model.search(test_queries_batch_gen, docs_batch_gen, test_query_ids, doc_ids, rel_calc_freq, top_k)
output_path = relpath('search_ranks/dssm/%s.top_%s.json'%(model_id, top_k), env('RESULT_DIR'))
with open(output_path, 'w') as f:
json.dump(search_ranks, f)
logger.info('Complete!')
if __name__ == '__main__':
args = parse_args({
'--train': {
'flags': ['-t'],
'opts': {'action':'store_true'},
},
'--search': {
'flags': ['-s'],
'opts': {'action':'store_true'},
},
'--model_id': {
'flags': ['-m'],
'opts': {
'help': 'Id of model used, required while searching.',
}
},
'--hibernate': {
# 'flags': ['-h'],
'opts': {'action':'store_true'},
},
})
if args.train:
load_model = False
if not load_model:
model_id = 'dssm.%s'%get_timestamp()
else:
existed_model_id = ''
model_id = existed_model_id
note = ''
to_log, log_tfvars = True, True
sample_weight_type = None # 'tacm'
model_config = DSSMConfig(**{
'input_size': 68894,
'loss_type': 'dssm_loss', # 'attention_rank',
'gamma': 32,
'irrel_num': 8,
'learning_rate': 1e-5,
'hid_units': [512, 256, 128],
'batch_norm': False,
'weight_decay': 0,
'use_sample_weights': (sample_weight_type is not None),
'seed': 18035,
})
# train params
bn_training = False
block_repo_name = '[block_repo_name]'
epochs = 9
val_ratio = 0.02
log_freqs = (300, 2400, 2000, 300, 300, 168000000)
# train, validation, val_info, summary, init_summary, reg_ckpt
max_step = 100000
batch_num = 1000 * 30
block_seed = 18035
with ExceptiontContainer(log_prefix='DSSM', hibernate=True, use_console=True, beep=True):
train_expt(model_id, 'train', model_config, note, to_log=to_log,
load_model=load_model, bn_training=bn_training, log_tfvars=log_tfvars, log_freqs=log_freqs, max_step=max_step,
block_repo_name=block_repo_name, epochs=epochs, val_ratio=val_ratio, sample_weight_type=sample_weight_type,
batch_num=batch_num, block_seed=block_seed)
elif args.search:
assert args.model_id is not None, 'model_id is not passed'
model_id = args.model_id
to_log = False
block_repo_name = 'npy_batches.v2.ne.wp-q_f8-d_f150.search.ba_64.al_2.bl_2400'
rel_calc_freq = 2000 # 在每多少個doc batch reprs計算後,一次性計算這些doc和test queries的relevances
top_k = 10
with ExceptiontContainer(log_prefix='DSSM', hibernate=True, use_console=True, beep=True):
search_expt(model_id, block_repo_name, rel_calc_freq, top_k, to_log)
if args.hibernate and not env('WORKING', dynamic=True):
os_shutdown(hibernate=True)
|
import numpy as np
import random
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
from plotdot.plotLog.figures import square, line_trace, add_points, line_trace_from_p
from plotdot.svgDraw.main import group_elements, polylines
from plotdot.svgDraw.transform import translate, transform_allmeths
def plot_lines(lines, autoscale=True):
figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k',)
if type(autoscale) == tuple:
plt.xlim(0, autoscale[0])
plt.ylim(0, autoscale[1])
line_width = 10 / autoscale[0]
else:
line_width = 0.8
plt_opts = {'color': 'black',
'linewidth': line_width}
for (x, y) in lines:
plt.plot(x, y, **plt_opts)
plt.grid(True)
plt.show()
def transpose_lines(lines):
lines_c = lines.copy()
for i, l in enumerate(lines_c):
l_arr = np.array(l)
l_t = l_arr.transpose()
lines[i] = l_t
def squares_plot():
width = 100
height = 100
cx = width / 2
cy = height / 2
lines = []
for i in range(1,4):
margin = 0.25
size = i + 1
lines.append(square(cx, cy, size))
lines.append(square(cx, cy, size + margin))
transpose_lines(lines)
plot_lines(lines)
def linetraces():
lines = line_trace(x0=3, xw=10, xh=0, y0=1, yw=3, yh=18)
#lines = line_trace()
transpose_lines(lines)
autoscale = (20, 20)
autoscale = True
#plot_lines(lines, autoscale)
return lines
def inter_plot(): # no plot_run
lines = []
plt_opts = {'color': 'black',
'linewidth': 1}
for i in range(1, 50):
y_arr = [1 + i, 2 * i, 3]
x_arr = [3 + 1.5*i, 4, 6 * i/2]
lines.append((x_arr, y_arr))
plt.plot(x_arr, y_arr, **plt_opts) # k=black
plt.show()
def line_by_line(dwg, lines, scale_f):
(x_transl, y_transl) = 0, 0
skew_angle = 0
scale_t = [1, 1]
transl_t = [0, 0]
skew_x_t = [0, None]
skew_y_t = [0, None]
rotate_t = [0, None]
trans_dic = {1: scale_t,
2: transl_t,
3: skew_x_t,
4: skew_y_t,
5: rotate_t,
}
scale_steps_x = -1 #.1 # .1 #2
scale_steps_y = .1
shift = [0, 0]
def make_line_trace():
line_n = add_points(l)
lines_traced, shift = line_trace_from_p(line_n, density=1, nr_lines=5)
#plines = polylines(dwg, lines_traced)
plines = polylines(dwg, lines_traced, trans_dic)
g_pline = group_elements(dwg, plines, class_n='polyline')
return g_pline, shift
groups = []
rand_int = random.randint(1, 5)
rand_ints = [i * rand_int for i in [1, 3, 4, 5, 8, 11]]
#rand_ints = [5]
for k, l in enumerate(lines):
x_scale = k * scale_steps_x
y_scale = k * scale_steps_y
#scale_t[0] = 1 + x_scale
scale_t[1] = 1 + y_scale
#skew_x_t[0] = 1 + x_scale
rotate_t[0] = 1 + x_scale
if k in rand_ints:
g, shift = make_line_trace()
if transl_t[0] > 100:
print('big trans', transl_t[0])
else:
if (k % 2) == 0:
line_n = add_points(l)
lines_n, shift_not = line_trace_from_p(line_n, nr_lines=1)
else:
lines_n = [l]
plines = polylines(dwg, lines_n, trans_dic)
#plines = polylines(dwg, lines_n)
g = group_elements(dwg, plines, class_n='polyline')
#transform_allmeths(g, trans_dic)
#transl_t[0] = transl_t[0] + shift[0] #* scale_f
#transl_t[1] = transl_t[1] + shift[1] #* scale_f
shift = [0, 0]
#translate(g, sx=x_shift*scale_f, sy=y_shift*scale_f)
groups.append(g)
return groups |
"""
当前包封装的异步任务函数
"""
from celery_tasks.main import celery_app
from celery_tasks.yuntongxun.ccp_sms import CCP
@celery_app.task(name='send_sms')
def send_sms(mobile, sms_code):
result = CCP().send_template_sms(mobile, [sms_code, 5], 1)
return result |
"""
amicus main: a friend to your python projects
Corey Rayburn Yung <[email protected]>
Copyright 2020-2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import annotations
import sys
from typing import (Any, Callable, ClassVar, Dict, Hashable, Iterable, List,
Mapping, MutableMapping, MutableSequence, Optional, Sequence, Set, Tuple,
Type, Union)
import amicus
def _args_to_dict() -> Dict[str, str]:
"""Converts command line arguments into 'arguments' dict.
The dictionary conversion is more forgiving than the typical argparse
construction. It allows the package to check default options and give
clearer error coding.
This handy bit of code, as an alternative to argparse, was found here:
https://stackoverflow.com/questions/54084892/
how-to-convert-commandline-key-value-args-to-dictionary
Returns:
Dict[str, str]: dictionary of command line options when the options are
separated by '='.
"""
arguments = {}
for argument in sys.argv[1:]:
if '=' in argument:
separated = argument.find('=')
key, value = argument[:separated], argument[separated + 1:]
arguments[key] = value
return arguments
if __name__ == '__main__':
# Gets command line arguments and converts them to dict.
arguments = _args_to_dict()
# Calls Project with passed command-line arguments.
amicus.Project(
settings = arguments.get('-settings'),
clerk = arguments.get('-clerk', None),
data = arguments.get('-data', None)) |
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, redirect
from django.apps import apps
from django.urls import reverse
from django.views.decorators.csrf import csrf_protect
from order.models import Dish, Order
from menu.models import SubMenu
from order.forms import Order, HomeForm, PickUpForm
from validate_email import validate_email
from django.core.mail import EmailMessage
def order(request):
menu_model = apps.get_model('menu.Menu')
sub_menu_model = apps.get_model('menu.SubMenu')
all_menu = menu_model.objects.all()
all_sub_menu = sub_menu_model.objects.all()
return render(request, 'order/order.html',
{'all_menu': all_menu, 'all_sub_menu': all_sub_menu})
def order_details(request):
return render(request, 'order/order_details.html')
def add_order(request, pk1, pk2):
# sub_menu_model = apps.get_model('menu.SubMenu')
add_sub_menu = SubMenu.objects.get(pk=pk1)
menu_model = apps.get_model('menu.Menu')
sub_menu_model = apps.get_model('menu.SubMenu')
all_menu = menu_model.objects.all()
all_sub_menu = sub_menu_model.objects.all()
dish_name = add_sub_menu.dish_name
dish_price = add_sub_menu.price
order_list = Order.objects.get(pk=pk2)
dish = Dish.objects.create(name=dish_name, price=dish_price)
dish.order.add(Order.objects.get(pk=pk2))
dish.save()
get_dishes = Dish.objects.all()
final_dishes = ""
value = ""
sum = 0
for i in get_dishes:
print(order_list.id)
print(pk2)
try:
value = i.order.get(pk=order_list.id)
print(value)
print('reached')
if value is not None:
final_dishes = Dish.objects.filter(
order=i.order.get(pk=order_list.id)).all()
print(final_dishes)
print(value.user_email)
for i in final_dishes:
sum += i.price
break
else:
continue
except Exception as e:
continue
return render(request, 'order/order.html',
{'final_dishes': final_dishes, 'all_menu': all_menu,
'all_sub_menu': all_sub_menu, 'order_id': pk2,
'value': value, 'sum': sum})
@csrf_protect
def home_delivery(request):
if request.method == 'POST':
home_form = HomeForm(request.POST)
if home_form.is_valid():
homes = home_form.save(commit=False)
homes.save()
print(homes.id)
menu_model = apps.get_model('menu.Menu')
sub_menu_model = apps.get_model('menu.SubMenu')
all_menu = menu_model.objects.all()
all_sub_menu = sub_menu_model.objects.all()
return render(request, 'order/order.html', {'all_menu': all_menu,
'all_sub_menu': all_sub_menu,
'order_id': homes.id})
else:
print(home_form.errors)
else:
home_form = HomeForm()
return render(request, 'order/order_details.html',
{'home_form': home_form})
@csrf_protect
def pick_up(request):
if request.method == 'POST':
pick_form = PickUpForm(request.POST)
if pick_form.is_valid():
homes = pick_form.save(commit=False)
# name = home_form.cleaned_data['name']
# email = home_form.cleaned_data['email']
homes.save()
return render(request, 'order/pickup_confirmation.html', {})
else:
print(pick_form.errors)
else:
pick_form = PickUpForm()
return render(request, 'order/order_details.html',
{'pick_form': pick_form})
def mail_order(request, pk):
user = Order.objects.get(pk=pk)
name = user.user
email_send = user.user_email
number = user.phone_number
print (number)
email = EmailMessage('Regarding Home Delivery',
"Received mail from " + str(
email_send) + "\n\n" + "name: " +
str(name) + "\n" + "contact: " + str(number),
to=['[email protected]', ])
email.send()
email = EmailMessage('Regarding Home Delivery',
"Hey " + str(
name) + ",\n\n" + "We have "
"received "
"your request "
"for order "
"\n" +
"We will contact you shortly on " + str(number),
to=[email_send,
'[email protected]'],
reply_to=[email_send, ])
email.send()
print('reached')
return render(request, 'order/order_confirmation.html', {})
|
from __future__ import print_function, division
import numpy as np
import pandas as pd
from os.path import *
from os import getcwd
from os import listdir
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import check_directory_exists, get_datastore
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding
# Column name mapping
columnNameMapping = {'V': ('voltage', ''),
'I': ('current', ''),
'f': ('frequency', ''),
'DPF': ('pf', 'd'),
'APF': ('power factor', 'apparent'),
'P': ('power', 'active'),
'Pt': ('energy', 'active'),
'Q': ('power', 'reactive'),
'Qt': ('energy', 'reactive'),
'S': ('power', 'apparent'),
'St': ('energy', 'apparent')}
TIMESTAMP_COLUMN_NAME = "TIMESTAMP"
TIMEZONE = "America/Vancouver"
def _get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
def convert_ampds(input_path, output_filename, format='HDF'):
"""
Parameters:
-----------
input_path: str
The path of the directory where all the csv
files are supposed to be stored
output_filename: str
The path of the h5 file where all the
standardized data is supposed to go. The path
should refer to a particular file and not just a
random directory in order for this to work.
format: str
Defaults to HDF5
Example usage:
--------------
convert('/AMPds/electricity', 'store.h5')
"""
check_directory_exists(input_path)
files = [f for f in listdir(input_path) if isfile(join(input_path, f)) and
'.csv' in f and '.swp' not in f]
# Sorting Lexicographically
files.sort()
# Remove Whole Home and put it at top
files.remove("WHE.csv")
files.insert(0, "WHE.csv")
assert isdir(input_path)
store = get_datastore(output_filename, format, mode='w')
for i, csv_file in enumerate(files):
key = Key(building=1, meter=(i + 1))
print('Loading file #', (i + 1), ' : ', csv_file, '. Please wait...')
df = pd.read_csv(join(input_path, csv_file))
# Due to fixed width, column names have spaces :(
df.columns = [x.replace(" ", "") for x in df.columns]
df.index = pd.to_datetime(df[TIMESTAMP_COLUMN_NAME], unit='s', utc=True)
df = df.drop(TIMESTAMP_COLUMN_NAME, 1)
df = df.tz_localize('GMT').tz_convert(TIMEZONE)
df.rename(columns=lambda x: columnNameMapping[x], inplace=True)
df.columns.set_names(LEVEL_NAMES, inplace=True)
df = df.convert_objects(convert_numeric=True)
df = df.dropna()
df = df.astype(np.float32)
store.put(str(key), df)
print("Done with file #", (i + 1))
store.close()
metadata_path = join(_get_module_directory(), 'metadata')
print('Processing metadata...')
convert_yaml_to_hdf5(metadata_path, output_filename) |
bl_info = {
'name': 'BlendNet - distributed cloud render',
'author': 'www.state-of-the-art.io',
'version': (0, 4, 0),
'warning': 'development version',
'blender': (2, 80, 0),
'location': 'Properties --> Render --> BlendNet Render',
'description': 'Allows to easy allocate resources in cloud and '
'run the cycles rendering with getting preview '
'and results',
'wiki_url': 'https://github.com/state-of-the-art/BlendNet/wiki',
'tracker_url': 'https://github.com/state-of-the-art/BlendNet/issues',
'category': 'Render',
}
if 'bpy' in locals():
import importlib
importlib.reload(BlendNet)
importlib.reload(blend_file)
else:
from . import (
BlendNet,
)
from .BlendNet import blend_file
import os
import time
import tempfile
from datetime import datetime
import bpy
from bpy.props import (
BoolProperty,
IntProperty,
StringProperty,
EnumProperty,
PointerProperty,
CollectionProperty,
)
class BlendNetAddonPreferences(bpy.types.AddonPreferences):
bl_idname = __package__
resource_provider: EnumProperty(
name = 'Provider',
description = 'Engine to provide resources for rendering',
items = BlendNet.addon.getProvidersEnumItems,
update = lambda self, context: BlendNet.addon.selectProvider(self.resource_provider),
)
blendnet_show_panel: BoolProperty(
name = 'Show BlendNet',
description = 'Show BlendNet render panel',
default = True,
)
# Advanced
blender_dist: EnumProperty(
name = 'Blender dist',
description = 'Blender distributive to use on manager/agents. '
'By default it\'s set to the current blender version and if '
'you want to change it - you will deal with the custom URL',
items = BlendNet.addon.fillAvailableBlenderDists,
update = lambda self, context: BlendNet.addon.updateBlenderDistProp(self.blender_dist),
)
blender_dist_url: StringProperty(
name = 'Blender dist URL',
description = 'URL to download the blender distributive',
default = '',
)
blender_dist_checksum: StringProperty(
name = 'Blender dist checksum',
description = 'Checksum of the distributive to validate the binary',
default = '',
)
blender_dist_custom: BoolProperty(
name = 'Custom dist URL',
description = 'Use custom url instead the automatic one',
default = False,
update = lambda self, context: BlendNet.addon.updateBlenderDistProp(),
)
session_id: StringProperty(
name = 'Session ID',
description = 'Identifier of the session and allocated resources. '
'It is used to properly find your resources in the GCP '
'project and separate your resources from the other ones. '
'Warning: Please be careful with this option and don\'t '
'change it if you don\'t know what it\'s doing',
maxlen = 12,
update = lambda self, context: BlendNet.addon.genSID(self, 'session_id'),
)
manager_instance_type: EnumProperty(
name = 'Manager size',
description = 'Selected manager instance size',
items = BlendNet.addon.fillAvailableInstanceTypesManager,
)
manager_ca_path: StringProperty(
name = 'CA certificate',
description = 'Certificate Authority certificate pem file location',
subtype = 'FILE_PATH',
default = '',
)
manager_address: StringProperty(
name = 'Address',
description = 'If you using the existing Manager service put address here '
'(it will be automatically created otherwise)',
default = '',
)
manager_port: IntProperty(
name = 'Port',
description = 'TLS tcp port to communicate Addon with Manager service',
min = 1,
max = 65535,
default = 8443,
)
manager_user: StringProperty(
name = 'User',
description = 'HTTP Basic Auth username (will be generated if empty)',
maxlen = 32,
default = 'blendnet-manager',
)
manager_password: StringProperty(
name = 'Password',
description = 'HTTP Basic Auth password (will be generated if empty)',
subtype = 'PASSWORD',
maxlen = 128,
default = '',
update = lambda self, context: BlendNet.addon.hidePassword(self, 'manager_password'),
)
manager_agent_instance_type: EnumProperty(
name = 'Agent size',
description = 'Selected agent instance size',
items = BlendNet.addon.fillAvailableInstanceTypesAgent,
)
manager_agents_max: IntProperty(
name = 'Agents max',
description = 'Maximum number of agents in Manager\'s pool',
min = 1,
max = 65535,
default = 3,
)
agent_use_cheap_instance: BoolProperty(
name = 'Use cheap VM',
description = 'Use cheap instances to save money',
default = True,
)
agent_cheap_multiplier: EnumProperty(
name = 'Cheap multiplier',
description = 'Way to choose the price to get a cheap VM. '
'Some providers allows to choose the maximum price for the instance '
'and it could be calculated from the ondemand (max) price multiplied by this value.',
items = BlendNet.addon.getCheapMultiplierList,
)
agent_port: IntProperty(
name = 'Port',
description = 'TLS tcp port to communicate Manager with Agent service',
min = 1,
max = 65535,
default = 9443,
)
agent_user: StringProperty(
name = 'User',
description = 'HTTP Basic Auth username (will be generated if empty)',
maxlen = 32,
default = 'blendnet-agent',
)
agent_password: StringProperty(
name = 'Password',
description = 'HTTP Basic Auth password (will be generated if empty)',
subtype = 'PASSWORD',
maxlen = 128,
default = '',
update = lambda self, context: BlendNet.addon.hidePassword(self, 'agent_password'),
)
# Hidden
show_advanced: BoolProperty(
name = 'Advanced Properties',
description = 'Show/Hide the advanced properties',
default = False,
)
manager_password_hidden: StringProperty(
subtype = 'PASSWORD',
update = lambda self, context: BlendNet.addon.genPassword(self, 'manager_password_hidden'),
)
agent_password_hidden: StringProperty(
subtype = 'PASSWORD',
update = lambda self, context: BlendNet.addon.genPassword(self, 'agent_password_hidden'),
)
def draw(self, context):
layout = self.layout
# Provider
box = layout.box()
row = box.row()
split = box.split(factor=0.8)
split.prop(self, 'resource_provider')
info = BlendNet.addon.getProviderDocs(self.resource_provider).split('\n')
for line in info:
if line.startswith('Help: '):
split.operator('wm.url_open', text='How to setup', icon='HELP').url = line.split(': ', 1)[-1]
provider_settings = BlendNet.addon.getProviderSettings()
for key, data in provider_settings.items():
path = 'provider_' + self.resource_provider + '_' + key
if not path in self.__class__.__annotations__:
print('ERROR: Unable to find provider setting:', path)
continue
if path not in self or self[path] is None:
self[path] = data.get('value')
box.prop(self, path)
messages = BlendNet.addon.getProviderMessages(self.resource_provider)
for msg in messages:
box.label(text=msg, icon='ERROR')
if not BlendNet.addon.checkProviderIsSelected():
err = BlendNet.addon.getProviderDocs(self.resource_provider).split('\n')
for line in err:
box.label(text=line.strip(), icon='ERROR')
return
if self.resource_provider != 'local':
box = box.box()
box.label(text='Collected cloud info:')
provider_info = BlendNet.addon.getProviderInfo(context)
if 'ERRORS' in provider_info:
for err in provider_info['ERRORS']:
box.label(text=err, icon='ERROR')
for key, value in provider_info.items():
if key == 'ERRORS':
continue
split = box.split(factor=0.5)
split.label(text=key, icon='DOT')
split.label(text=value)
# Advanced properties panel
advanced_icon = 'TRIA_RIGHT' if not self.show_advanced else 'TRIA_DOWN'
box = layout.box()
box.prop(self, 'show_advanced', emboss=False, icon=advanced_icon)
if self.show_advanced:
if self.resource_provider != 'local':
row = box.row()
row.prop(self, 'session_id')
row = box.row(align=True)
row.prop(self, 'blender_dist_custom', text='')
if not self.blender_dist_custom:
row.prop(self, 'blender_dist')
else:
row.prop(self, 'blender_dist_url')
box.row().prop(self, 'blender_dist_checksum')
box_box = box.box()
box_box.label(text='Manager')
if self.resource_provider != 'local':
row = box_box.row()
row.prop(self, 'manager_instance_type', text='Type')
row = box_box.row()
price = BlendNet.addon.getManagerPriceBG(self.manager_instance_type, context)
if price[0] < 0.0:
row.label(text='WARNING: Unable to find price for the type "%s": %s' % (
self.manager_instance_type, price[1]
), icon='ERROR')
else:
row.label(text='Calculated price: ~%s/Hour (%s)' % (round(price[0], 12), price[1]))
if self.resource_provider == 'local':
row = box_box.row()
row.use_property_split = True
row.prop(self, 'manager_address')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'manager_ca_path')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'manager_port')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'manager_user')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'manager_password')
box_box = box.box()
box_box.label(text='Agent')
if self.resource_provider != 'local':
row = box_box.row()
row.prop(self, 'agent_use_cheap_instance')
if 'Cheap instances not available' in provider_info.get('ERRORS', []):
row.enabled = False
else:
row.prop(self, 'agent_cheap_multiplier')
row = box_box.row()
row.enabled = not BlendNet.addon.isManagerCreated()
row.prop(self, 'manager_agent_instance_type', text='Agents type')
row.prop(self, 'manager_agents_max', text='Agents max')
row = box_box.row()
price = BlendNet.addon.getAgentPriceBG(self.manager_agent_instance_type, context)
if price[0] < 0.0:
row.label(text='ERROR: Unable to find price for the type "%s": %s' % (
self.manager_agent_instance_type, price[1]
), icon='ERROR')
else:
row.label(text='Calculated combined price: ~%s/Hour (%s)' % (
round(price[0] * self.manager_agents_max, 12), price[1]
))
min_price = BlendNet.addon.getMinimalCheapPriceBG(self.manager_agent_instance_type, context)
if min_price > 0.0:
row = box_box.row()
row.label(text='Minimal combined price: ~%s/Hour' % (
round(min_price * self.manager_agents_max, 12),
))
if price[0] <= min_price:
row = box_box.row()
row.label(text='ERROR: Selected cheap price is lower than minimal one', icon='ERROR')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'agent_port')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'agent_user')
row = box_box.row()
row.use_property_split = True
row.prop(self, 'agent_password')
class BlendNetSceneSettings(bpy.types.PropertyGroup):
scene_memory_req: IntProperty(
name = 'Scene RAM to render',
description = 'Required memory to render the scene in GB',
min = 0,
max = 65535,
default = 0,
)
@classmethod
def register(cls):
bpy.types.Scene.blendnet = PointerProperty(
name = 'BlendNet Settings',
description = 'BlendNet scene settings',
type = cls
)
@classmethod
def unregister(cls):
if hasattr(bpy.types.Scene, 'blendnet'):
del bpy.types.Scene.blendnet
class BlendNetManagerTask(bpy.types.PropertyGroup):
'''Class contains the manager task information'''
name: StringProperty()
create_time: StringProperty()
start_time: StringProperty()
end_time: StringProperty()
state: StringProperty()
done: StringProperty()
received: StringProperty()
class BlendNetSessionProperties(bpy.types.PropertyGroup):
manager_tasks: CollectionProperty(
name = 'Manager tasks',
description = 'Contains all the tasks that right now is available '
'on manager',
type = BlendNetManagerTask,
)
manager_tasks_idx: IntProperty(default=0)
status: StringProperty(
name = 'BlendNet status',
description = 'BlendNet is performing some operation',
default = 'idle',
)
@classmethod
def register(cls):
bpy.types.WindowManager.blendnet = PointerProperty(
name = 'BlendNet Session Properties',
description = 'Just current status of process for internal use',
type = cls,
)
@classmethod
def unregister(cls):
if hasattr(bpy.types.WindowManager, 'blendnet'):
del bpy.types.WindowManager.blendnet
class BlendNetToggleManager(bpy.types.Operator):
bl_idname = 'blendnet.togglemanager'
bl_label = ''
bl_description = 'Start/Stop manager instance'
_timer = None
_last_run = 0
@classmethod
def poll(cls, context):
return context.window_manager.blendnet.status == 'idle' or BlendNet.addon.isManagerStarted()
def invoke(self, context, event):
wm = context.window_manager
BlendNet.addon.toggleManager()
if BlendNet.addon.isManagerStarted():
self.report({'INFO'}, 'BlendNet stopping Manager instance...')
wm.blendnet.status = 'Manager stopping...'
else:
self.report({'INFO'}, 'BlendNet starting Manager instance...')
wm.blendnet.status = 'Manager starting...'
if context.area:
context.area.tag_redraw()
wm.modal_handler_add(self)
self._timer = wm.event_timer_add(5.0, window=context.window)
return {'RUNNING_MODAL'}
def modal(self, context, event):
if event.type != 'TIMER' or self._last_run + 4.5 > time.time():
return {'PASS_THROUGH'}
self._last_run = time.time()
return self.execute(context)
def execute(self, context):
wm = context.window_manager
if wm.blendnet.status == 'Manager starting...':
if not BlendNet.addon.isManagerStarted():
return {'PASS_THROUGH'}
self.report({'INFO'}, 'BlendNet Manager started')
wm.blendnet.status = 'Manager connecting...'
if context.area:
context.area.tag_redraw()
BlendNet.addon.requestManagerInfo(context)
elif wm.blendnet.status == 'Manager stopping...':
if not BlendNet.addon.isManagerStopped():
return {'PASS_THROUGH'}
if wm.blendnet.status == 'Manager connecting...':
if not BlendNet.addon.requestManagerInfo(context):
return {'PASS_THROUGH'}
self.report({'INFO'}, 'BlendNet Manager connected')
if self._timer is not None:
wm.event_timer_remove(self._timer)
wm.blendnet.status = 'idle'
if context.area:
context.area.tag_redraw()
return {'FINISHED'}
class BlendNetDestroyManager(bpy.types.Operator):
bl_idname = 'blendnet.destroymanager'
bl_label = ''
bl_description = 'Destroy manager instance'
@classmethod
def poll(cls, context):
return BlendNet.addon.isManagerStopped()
def invoke(self, context, event):
BlendNet.addon.destroyManager()
self.report({'INFO'}, 'BlendNet destroy Manager instance...')
return {'FINISHED'}
class BlendNetTaskPreviewOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskpreview'
bl_label = 'Open preview'
bl_description = 'Show the render for the currently selected task'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
return len(bn.manager_tasks) > bn.manager_tasks_idx
def _findRenderResultArea(self, context):
for window in context.window_manager.windows:
if window.scene != context.scene:
continue
for area in window.screen.areas:
if area.type != 'IMAGE_EDITOR':
continue
if area.spaces.active.image.type == 'RENDER_RESULT':
return area
return None
def invoke(self, context, event):
# Show the preview of the render if not open
if not self._findRenderResultArea(context):
bpy.ops.render.view_show('INVOKE_DEFAULT')
# Save the original render engine to run render on BlendNet
original_render_engine = context.scene.render.engine
context.scene.render.engine = __package__
# Start the render process
self.result = bpy.ops.render.render('INVOKE_DEFAULT')
# Restore the original scene engine
time.sleep(1.0)
if context.scene.render.engine == __package__:
context.scene.render.engine = original_render_engine
return {'FINISHED'}
class BlendNetRunTaskOperation(bpy.types.Operator):
bl_idname = 'blendnet.runtask'
bl_label = 'Run Task'
bl_description = 'Run Manager task using BlendNet resources'
is_animation: BoolProperty(
name = 'Animation',
description = 'Runs animation rendering instead of just a still image rendering',
default = False
)
_timer = None
_project_file: None # temp blend project file to ensure it will not be changed
_frame: 0 # current/start frame depends on animation
_frame_to: 0 # end frame for animation
_frame_orig: 0 # to restore the current frame after animation processing
_task_name: None # store task name to retry later
@classmethod
def poll(cls, context):
return BlendNet.addon.isManagerActive()
def _findRenderResultArea(self, context):
for window in context.window_manager.windows:
if window.scene != context.scene:
continue
for area in window.screen.areas:
if area.type != 'IMAGE_EDITOR':
continue
if area.spaces.active.image.type == 'RENDER_RESULT':
return area
def init(self, context):
'''Initializes the execution'''
if not bpy.data.filepath:
self.report({'ERROR'}, 'Unable to render not saved project. Please save it somewhere.')
return {'CANCELLED'}
# Fix and verify the blendfile dependencies
bads = blend_file.getDependencies(bpy.path.abspath('//'), os.path.abspath(''))[1]
if bads:
self.report({'ERROR'}, 'Found some bad dependencies - please fix them before run: %s' % (bads,))
return {'CANCELLED'}
# Saving project to the same directory
try:
self._project_file = bpy.data.filepath + '_blendnet.blend'
bpy.ops.wm.save_as_mainfile(
filepath = self._project_file,
check_existing = False,
compress = True,
copy = True,
)
except Exception as e:
self.report({'ERROR'}, 'Unable to save the "_blendnet.blend" project file: %s' % (e,))
return {'CANCELLED'}
if self.is_animation:
self._frame = context.scene.frame_start
self._frame_to = context.scene.frame_end
self._frame_orig = context.scene.frame_current
else:
self._frame = context.scene.frame_current
self._task_name = None
context.window_manager.modal_handler_add(self)
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
return {'RUNNING_MODAL'}
def invoke(self, context, event):
return self.init(context)
def modal(self, context, event):
if event.type != 'TIMER':
return {'PASS_THROUGH'}
# Waiting for manager
if not BlendNet.addon.isManagerActive():
return {'PASS_THROUGH'}
return self.execute(context)
def execute(self, context):
scene = context.scene
wait = False
if not hasattr(self, '_frame'):
wait = True # The execute is running directly, so run in fg
if 'CANCELLED' in self.init(context):
self.report({'ERROR'}, 'Unable to init task preparation')
return {'CANCELLED'}
scene.frame_current = self._frame
fname = bpy.path.basename(bpy.data.filepath)
if not self._task_name:
# If the operation is not completed - reuse the same task name
d = datetime.utcnow().strftime('%y%m%d%H%M')
self._task_name = '%s%s-%d-%s' % (
BlendNet.addon.getTaskProjectPrefix(),
d, scene.frame_current,
BlendNet.addon.genRandomString(3)
)
print('DEBUG: Uploading task "%s" to the manager' % self._task_name)
# Prepare list of files need to be uploaded
deps, bads = blend_file.getDependencies(bpy.path.abspath('//'), os.path.abspath(''))
if bads:
self.report({'ERROR'}, 'Found some bad dependencies - please fix them before run: %s' % (bads,))
return {'CANCELLED'}
deps_map = dict([ (rel, bpy.path.abspath(rel)) for rel in deps ])
deps_map['//'+fname] = self._project_file
# Run the dependencies upload background process
BlendNet.addon.managerTaskUploadFiles(self._task_name, deps_map)
# Slow down the check process
if self._timer is not None:
context.window_manager.event_timer_remove(self._timer)
self._timer = context.window_manager.event_timer_add(3.0, window=context.window)
status = BlendNet.addon.managerTaskUploadFilesStatus()
if wait:
for retry in range(1, 10):
status = BlendNet.addon.managerTaskUploadFilesStatus()
if not status:
break
time.sleep(1.0)
if status:
self.report({'INFO'}, 'Uploading process for task %s: %s' % (self._task_name, status))
return {'PASS_THROUGH'}
# Configuring the task
print('INFO: Configuring task "%s"' % self._task_name)
self.report({'INFO'}, 'Configuring task "%s"' % (self._task_name,))
samples = None
if hasattr(scene.cycles, 'progressive'):
# For blender < 3.0.0
if scene.cycles.progressive == 'PATH':
samples = scene.cycles.samples
elif scene.cycles.progressive == 'BRANCHED_PATH':
samples = scene.cycles.aa_samples
else:
samples = scene.cycles.samples
if hasattr(scene.cycles, 'use_square_samples'):
# For blender < 3.0.0
# Addon need to pass the actual samples number to the manager
if scene.cycles.use_square_samples:
samples *= samples
# Where the compose result will be stored on the Addon side
compose_filepath = scene.render.frame_path()
if scene.render.filepath.startswith('//'):
# It's relative to blend project path
compose_filepath = bpy.path.relpath(compose_filepath)
cfg = {
'samples': samples,
'frame': scene.frame_current,
'project': fname,
'use_compositing_nodes': scene.render.use_compositing,
'compose_filepath': compose_filepath,
'project_path': bpy.path.abspath('//'), # To resolve the project parent paths like `//../..`
'cwd_path': os.path.abspath(''), # Current working directory to resolve relative paths like `../dir/file.txt`
}
if not BlendNet.addon.managerTaskConfig(self._task_name, cfg):
self.report({'WARNING'}, 'Unable to config the task "%s", let\'s retry...' % (self._task_name,))
return {'PASS_THROUGH'}
# Running the task
self.report({'INFO'}, 'Running task "%s"' % self._task_name)
if not BlendNet.addon.managerTaskRun(self._task_name):
self.report({'WARNING'}, 'Unable to start the task "%s", let\'s retry...' % (self._task_name,))
return {'PASS_THROUGH'}
self.report({'INFO'}, 'Task "%s" marked as ready to start' % (self._task_name,))
# Ok, task is started - we can clean the name
self._task_name = None
if self.is_animation:
if self._frame < self._frame_to:
# Not all the frames are processed
self._frame += 1
return {'PASS_THROUGH'}
# Restore the original current frame
scene.frame_current = self._frame_orig
# Removing no more required temp blend file
os.remove(self._project_file)
if self._timer is not None:
context.window_manager.event_timer_remove(self._timer)
return {'FINISHED'}
class TASKS_UL_list(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname):
self.use_filter_sort_alpha = True
if self.layout_type in {'DEFAULT', 'COMPACT'}:
split = layout.split(factor=0.7)
split.label(text=item.name)
split.label(text=('%s:%s' % (item.state[0], item.done)) if item.done and item.state != 'COMPLETED' else item.state)
elif self.layout_type in {'GRID'}:
pass
class BlendNetGetNodeLogOperation(bpy.types.Operator):
bl_idname = 'blendnet.getnodelog'
bl_label = 'Get Node Log'
bl_description = 'Show the node (instance) log data'
node_id: StringProperty(
name = 'Node ID',
description = 'ID of the node/instance to get the log',
default = ''
)
@classmethod
def poll(cls, context):
return True
def invoke(self, context, event):
wm = context.window_manager
data = BlendNet.addon.getNodeLog(self.node_id)
if not data:
self.report({'WARNING'}, 'No log data retreived for ' + self.node_id)
return {'CANCELLED'}
if data == 'NOT IMPLEMENTED':
self.report({'WARNING'}, 'Not implemented for the current provider')
return {'CANCELLED'}
prefix = self.node_id
def drawPopup(self, context):
layout = self.layout
if BlendNet.addon.showLogWindow(prefix, data):
layout.label(text='''Don't forget to unlink the file if you '''
'''don't want it to stay in blend file.''')
else:
layout.label(text='Unable to show the log window', icon='ERROR')
wm.popup_menu(drawPopup, title='Log for'+prefix, icon='INFO')
return {'FINISHED'}
class BlendNetGetAddonLogOperation(bpy.types.Operator):
bl_idname = 'blendnet.getaddonlog'
bl_label = 'Get BlendNet Addon Log'
bl_description = 'Show the running BlendNet addon log information'
@classmethod
def poll(cls, context):
return True
def invoke(self, context, event):
wm = context.window_manager
out = BlendNet.addon.getAddonLog()
prefix = 'addon'
if not out:
self.report({'ERROR'}, 'No log data found for ' + prefix)
return {'CANCELLED'}
data = []
line = ''
for t, l in out.items():
if not l.endswith('\n'):
line += l
continue
time_str = datetime.fromtimestamp(round(float(t), 3)).strftime('%y.%m.%d %H:%M:%S.%f')
data.append(time_str + '\t' + line + l)
line = ''
if line:
data.append('{not completed line}\t' + line)
data = ''.join(data)
def drawPopup(self, context):
layout = self.layout
if BlendNet.addon.showLogWindow(prefix, data):
layout.label(text='Don\'t forget to unlink the file if you don\'t want it to stay in blend file.')
else:
layout.label(text='Unable to show the log window', icon='ERROR')
wm.popup_menu(drawPopup, title='Log for ' + prefix, icon='INFO')
return {'FINISHED'}
class BlendNetGetServiceLogOperation(bpy.types.Operator):
bl_idname = 'blendnet.getservicelog'
bl_label = 'Get Service Log'
bl_description = 'Show the service (daemon) log data'
agent_name: StringProperty(
name = 'Name of Agent',
description = 'Name of Agent (or Manager by default) to get the log from',
default = ''
)
@classmethod
def poll(cls, context):
return True
def invoke(self, context, event):
wm = context.window_manager
out = {}
if self.agent_name:
out = BlendNet.addon.agentGetLog(self.agent_name)
else:
out = BlendNet.addon.managerGetLog()
prefix = self.agent_name if self.agent_name else BlendNet.addon.getResources(context).get('manager', {}).get('name')
if not out:
self.report({'ERROR'}, 'No log data retreived for ' + prefix)
return {'CANCELLED'}
data = []
line = ''
for t, l in out.items():
if not l.endswith('\n'):
line += l
continue
time_str = datetime.fromtimestamp(round(float(t), 3)).strftime('%y.%m.%d %H:%M:%S.%f')
data.append(time_str + '\t' + line + l)
line = ''
if line:
data.append('{not completed line}\t' + line)
data = ''.join(data)
def drawPopup(self, context):
layout = self.layout
if BlendNet.addon.showLogWindow(prefix, data):
layout.label(text='Don\'t forget to unlink the file if you don\'t want it to stay in blend file.')
else:
layout.label(text='Unable to show the log window', icon='ERROR')
wm.popup_menu(drawPopup, title='Log for' + prefix, icon='INFO')
return {'FINISHED'}
class BlendNetTaskInfoOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskinfo'
bl_label = 'Task info'
bl_description = 'Show the current task info panel'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
return len(bn.manager_tasks) > bn.manager_tasks_idx
def invoke(self, context, event):
wm = context.window_manager
def drawPopup(self, context):
layout = self.layout
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
data = BlendNet.addon.managerTaskStatus(task_name)
if not data:
return
keys = BlendNet.addon.naturalSort(data.keys())
for key in keys:
if key == 'result':
layout.label(text='%s:' % (key,))
for k in data[key]:
layout.label(text=' %s: %s' % (k, data[key][k]))
elif key == 'state_error_info':
layout.label(text='%s:' % (key,), icon='ERROR')
for it in data[key]:
if isinstance(it, dict):
for k, v in it.items():
layout.label(text=' %s: %s' % (k, v))
else:
layout.label(text=' ' + str(it))
else:
layout.label(text='%s: %s' % (key, data[key]))
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
wm.popup_menu(drawPopup, title='Task info for "%s"' % task_name, icon='INFO')
return {'FINISHED'}
class BlendNetTaskMessagesOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskmessages'
bl_label = 'Show task messages'
bl_description = 'Show the task execution messages'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
if len(bn.manager_tasks) <= bn.manager_tasks_idx:
return False
task_state = bn.manager_tasks[bn.manager_tasks_idx].state
return task_state not in {'CREATED', 'PENDING'}
def invoke(self, context, event):
wm = context.window_manager
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
out = BlendNet.addon.managerTaskMessages(task_name)
if not out:
self.report({'ERROR'}, 'No task messages found for "%s"' % (task_name,))
return {'CANCELLED'}
data = []
keys = BlendNet.addon.naturalSort(out.keys())
for key in keys:
data.append(key)
if not out[key]:
continue
for line in out[key]:
data.append(' ' + line)
data = '\n'.join(data)
prefix = task_name + 'messages'
def drawPopup(self, context):
layout = self.layout
if BlendNet.addon.showLogWindow(prefix, data):
layout.label(text='Don\'t forget to unlink the file if you don\'t want it to stay in blend file.')
else:
layout.label(text='Unable to show the log window', icon='ERROR')
wm.popup_menu(drawPopup, title='Task messages for "%s"' % (task_name,), icon='TEXT')
return {'FINISHED'}
class BlendNetTaskDetailsOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskdetails'
bl_label = 'Show task details'
bl_description = 'Show the task execution details'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
if len(bn.manager_tasks) <= bn.manager_tasks_idx:
return False
task_state = bn.manager_tasks[bn.manager_tasks_idx].state
return task_state not in {'CREATED', 'PENDING'}
def invoke(self, context, event):
wm = context.window_manager
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
out = BlendNet.addon.managerTaskDetails(task_name)
if not out:
self.report({'ERROR'}, 'No task details found for "%s"' % (task_name,))
return {'CANCELLED'}
data = []
keys = BlendNet.addon.naturalSort(out.keys())
for key in keys:
data.append(key)
if not out[key]:
continue
for line in out[key]:
data.append(' ' + str(line))
data = '\n'.join(data)
prefix = task_name + 'details'
def drawPopup(self, context):
layout = self.layout
if BlendNet.addon.showLogWindow(prefix, data):
layout.label(text='Don\'t forget to unlink the file if you don\'t want it to stay in blend file.')
else:
layout.label(text='Unable to show the log window', icon='ERROR')
wm.popup_menu(drawPopup, title='Task details for "%s"' % (task_name,), icon='TEXT')
return {'FINISHED'}
class BlendNetTaskRunOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskrun'
bl_label = 'Task run'
bl_description = 'Start the stopped or created task'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
if len(bn.manager_tasks) <= bn.manager_tasks_idx:
return False
task_state = bn.manager_tasks[bn.manager_tasks_idx].state
return task_state in {'CREATED', 'STOPPED'}
def invoke(self, context, event):
wm = context.window_manager
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
BlendNet.addon.managerTaskRun(task_name)
return {'FINISHED'}
class BlendNetTaskDownloadOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskdownload'
bl_label = 'Download task result'
bl_description = 'Download the completed task result'
result: StringProperty()
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
if len(bn.manager_tasks) <= bn.manager_tasks_idx:
return False
task_state = bn.manager_tasks[bn.manager_tasks_idx].state
# Allow to download results even for error state
return task_state in {'COMPLETED', 'ERROR'}
def invoke(self, context, event):
wm = context.window_manager
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
# If the result is downloaded manually - use the current project output directory
out_dir = os.path.dirname(bpy.context.scene.render.frame_path())
dir_path = os.path.join(out_dir, self.result)
result = BlendNet.addon.managerDownloadTaskResult(task_name, self.result, dir_path)
if result is None:
self.report({'WARNING'}, 'Unable to download the final result for %s, please retry later ' % (task_name,))
return {'CANCELLED'}
if not result:
self.report({'INFO'}, 'Downloading the final result for %s... ' % (task_name,))
return {'FINISHED'}
self.report({'INFO'}, 'The file is already downloaded and seems the same for %s... ' % (task_name,))
return {'CANCELLED'}
class BlendNetTaskStopOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskstop'
bl_label = 'Task stop'
bl_description = 'Stop the pending, running or error task'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
if len(bn.manager_tasks) <= bn.manager_tasks_idx:
return False
task_state = bn.manager_tasks[bn.manager_tasks_idx].state
return task_state in {'PENDING', 'RUNNING', 'ERROR'}
def invoke(self, context, event):
wm = context.window_manager
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
BlendNet.addon.managerTaskStop(task_name)
return {'FINISHED'}
class BlendNetTasksStopStartedOperation(bpy.types.Operator):
bl_idname = 'blendnet.tasksstopstarted'
bl_label = 'Stop all started tasks'
bl_description = 'Stop all the pending or running tasks'
bl_options = {'REGISTER', 'INTERNAL'}
tasks: CollectionProperty(type=BlendNetManagerTask)
@classmethod
def poll(cls, context):
return True
def invoke(self, context, event):
wm = context.window_manager
self.tasks.clear()
for task in wm.blendnet.manager_tasks:
if task.state in {'PENDING', 'RUNNING'}:
self.tasks.add().name = task.name
return wm.invoke_confirm(self, event)
def execute(self, context):
self.report({'INFO'}, 'Stopping %s tasks' % len(self.tasks))
for task in self.tasks:
print('INFO: Stopping task "%s"' % task.name)
BlendNet.addon.managerTaskStop(task.name)
self.tasks.clear()
return {'FINISHED'}
class BlendNetTaskRemoveOperation(bpy.types.Operator):
bl_idname = 'blendnet.taskremove'
bl_label = 'Remove selected task'
bl_description = 'Remove the task from the tasks list'
bl_options = {'REGISTER', 'INTERNAL'}
task_name: StringProperty()
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
if len(bn.manager_tasks) <= bn.manager_tasks_idx:
return False
return bn.manager_tasks[bn.manager_tasks_idx].state in {'CREATED', 'STOPPED', 'COMPLETED', 'ERROR'}
def invoke(self, context, event):
wm = context.window_manager
self.task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
return wm.invoke_confirm(self, event)
def execute(self, context):
self.report({'INFO'}, 'Removing task "%s"' % self.task_name)
BlendNet.addon.managerTaskRemove(self.task_name)
return {'FINISHED'}
class BlendNetAgentRemoveOperation(bpy.types.Operator):
bl_idname = 'blendnet.agentremove'
bl_label = 'Remove the agent'
bl_description = 'Remove the agent from the agents pool or terminate in case of cloud provider'
bl_options = {'REGISTER', 'INTERNAL'}
agent_name: StringProperty()
@classmethod
def poll(cls, context):
return True
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_confirm(self, event)
def execute(self, context):
self.report({'INFO'}, 'Removing agent "%s"' % self.agent_name)
prefs = bpy.context.preferences.addons[__package__].preferences
if prefs.resource_provider == 'local':
if not BlendNet.addon.managerAgentRemove(self.agent_name):
self.report({'WARNING'}, 'Unable to remove agent "%s"' % (self.agent_name,))
return {'CANCELLED'}
self.report({'INFO'}, 'Removed agent "%s"' % (self.agent_name,))
else:
BlendNet.addon.destroyAgent(self.agent_name)
self.report({'INFO'}, 'BlendNet destroy Agent instance ' + self.agent_name)
return {'FINISHED'}
class BlendNetAgentCreateOperation(bpy.types.Operator):
bl_idname = 'blendnet.agentcreate'
bl_label = 'Agent create'
bl_description = 'Register new agent in the manager'
agent_name: StringProperty(
name = 'Name',
description = 'Name of Agent to create',
default = ''
)
agent_address: StringProperty(
name = 'Address',
description = 'IP or domain name of the agent',
default = ''
)
agent_port: IntProperty(
name = 'Port',
description = 'TLS tcp port to communicate Manager with Agent service',
min = 1,
max = 65535,
default = 9443,
)
agent_user: StringProperty(
name = 'User',
description = 'HTTP Basic Auth username',
maxlen = 32,
default = '',
)
agent_password: StringProperty(
name = 'Password',
description = 'HTTP Basic Auth password',
subtype = 'PASSWORD',
maxlen = 128,
default = '',
)
@classmethod
def poll(cls, context):
return BlendNet.addon.isManagerActive()
def invoke(self, context, event):
wm = context.window_manager
prefs = bpy.context.preferences.addons[__package__].preferences
self.agent_port = prefs.agent_port
self.agent_user = prefs.agent_user
self.agent_password = prefs.agent_password_hidden
return wm.invoke_props_dialog(self)
def execute(self, context):
if not self.agent_name:
self.report({'ERROR'}, 'No agent name is specified')
return {'PASS_THROUGH'}
if not self.agent_address:
self.report({'ERROR'}, 'No agent address is specified')
return {'PASS_THROUGH'}
cfg = {
'address': self.agent_address,
'port': self.agent_port,
'auth_user': self.agent_user,
'auth_password': self.agent_password,
}
if not BlendNet.addon.managerAgentCreate(self.agent_name, cfg):
self.report({'WARNING'}, 'Unable to create agent "%s"' % (self.agent_name,))
return {'PASS_THROUGH'}
self.report({'INFO'}, 'Created agent "%s" (%s:%s)' % (
self.agent_name, self.agent_address, self.agent_port
))
return {'FINISHED'}
class BlendNetTasksRemoveEndedOperation(bpy.types.Operator):
bl_idname = 'blendnet.tasksremoveended'
bl_label = 'Remove all ended tasks'
bl_description = 'Remove all the stopped or completed tasks'
bl_options = {'REGISTER', 'INTERNAL'}
tasks: CollectionProperty(type=BlendNetManagerTask)
@classmethod
def poll(cls, context):
return True
def invoke(self, context, event):
wm = context.window_manager
self.tasks.clear()
for task in wm.blendnet.manager_tasks:
if task.state in {'STOPPED', 'COMPLETED'}:
self.tasks.add().name = task.name
return wm.invoke_confirm(self, event)
def execute(self, context):
self.report({'INFO'}, 'Removing %s tasks' % len(self.tasks))
for task in self.tasks:
print('INFO: Removing task "%s"' % task.name)
BlendNet.addon.managerTaskRemove(task.name)
self.tasks.clear()
return {'FINISHED'}
class BlendNetTaskMenu(bpy.types.Menu):
bl_idname = 'RENDER_MT_blendnet_task_menu'
bl_label = 'Task Menu'
bl_description = 'Allow to operate on tasks in the list'
@classmethod
def poll(cls, context):
bn = context.window_manager.blendnet
return len(bn.manager_tasks) > bn.manager_tasks_idx
def draw(self, context):
layout = self.layout
wm = context.window_manager
if not wm.blendnet.manager_tasks:
layout.label(text='No tasks in the list')
return
if len(wm.blendnet.manager_tasks) <= wm.blendnet.manager_tasks_idx:
# No such item in the list
return
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
layout.label(text='Task "%s":' % task_name)
layout.operator('blendnet.taskinfo', icon='INFO')
layout.operator('blendnet.taskmessages', icon='TEXT')
layout.operator('blendnet.taskdetails', icon='TEXT')
layout.operator('blendnet.taskdownload', text='Download render', icon='DOWNARROW_HLT').result = 'render'
layout.operator('blendnet.taskdownload', text='Download compose', icon='DOWNARROW_HLT').result = 'compose'
layout.operator('blendnet.taskrun', icon='PLAY')
layout.operator('blendnet.taskremove', icon='TRASH')
layout.operator('blendnet.taskstop', icon='PAUSE')
layout.label(text='All tasks actions:')
layout.operator('blendnet.tasksstopstarted', text='Stop all started tasks', icon='PAUSE')
layout.operator('blendnet.tasksremoveended', text='Remove all ended tasks', icon='TRASH')
class BlendNetRenderPanel(bpy.types.Panel):
bl_idname = 'RENDER_PT_blendnet_render'
bl_label = 'BlendNet'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'render'
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
# Allow to see the tasks if selected blendnet and support cycles
return context.scene.render.engine in ('CYCLES', __package__)
def draw(self, context):
layout = self.layout
wm = context.window_manager
bn = context.scene.blendnet
prefs = context.preferences.addons[__package__].preferences
box = layout.box()
row = box.split(factor=0.5)
split = row.split(factor=0.1)
split.prop(prefs, 'blendnet_show_panel', icon_only=True)
split.label(text='BlendNet Render (%s)' % (prefs.resource_provider,))
split = row.split(factor=0.9)
split.label(text=context.window_manager.blendnet.status)
split.operator('blendnet.getaddonlog', text='', icon='TEXT')
if not prefs.blendnet_show_panel:
return
row = box.row()
row.use_property_split = True
row.use_property_decorate = False # No prop animation
row.prop(bn, 'scene_memory_req', text='Render RAM (GB)')
if not BlendNet.addon.checkProviderIsSelected():
box.label(text='ERROR: Provider init failed, check addon settings', icon='ERROR')
return
if not BlendNet.addon.checkAgentMemIsEnough():
box.label(text='WARN: Agent does not have enough memory to render the scene', icon='ERROR')
if not prefs.agent_use_cheap_instance:
box.label(text='WARN: No cheap VMs available, check addon settings', icon='ERROR')
if context.scene.render.engine != __package__:
row = box.row(align=True)
if BlendNet.addon.isManagerStarted():
row.operator('blendnet.runtask', text='Run Image Task', icon='RENDER_STILL').is_animation = False
row.operator('blendnet.runtask', text='Run Animation Tasks', icon='RENDER_ANIMATION').is_animation = True
elif prefs.resource_provider != 'local':
row.operator('blendnet.togglemanager', text='Run Manager instance', icon='ADD')
elif prefs.resource_provider == 'local':
split = row.split(factor=0.3)
split.label(text='Using Manager')
split.label(text='%s:%s' % (prefs.manager_address, prefs.manager_port))
if BlendNet.addon.isManagerActive():
box.template_list('TASKS_UL_list', '', wm.blendnet, 'manager_tasks', wm.blendnet, 'manager_tasks_idx', rows=1)
split = box.split(factor=0.8)
split.operator('blendnet.taskpreview', text='Task Preview', icon='RENDER_RESULT')
split.menu('RENDER_MT_blendnet_task_menu', text='Actions')
class BlendNetManagerPanel(bpy.types.Panel):
bl_idname = 'RENDER_PT_blendnet_manager'
bl_parent_id = 'RENDER_PT_blendnet_render'
bl_label = ' '
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'render'
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return context.preferences.addons[__package__].preferences.blendnet_show_panel and BlendNet.addon.checkProviderIsSelected()
def draw_header(self, context):
layout = self.layout
layout.label(text='Manager')
status = BlendNet.addon.getManagerStatus()
layout.label(text=status[0], icon=status[1])
prefs = bpy.context.preferences.addons[__package__].preferences
if prefs.resource_provider != 'local':
layout.operator('blendnet.togglemanager', icon='ADD' if not BlendNet.addon.isManagerStarted() else 'X')
layout.operator('blendnet.destroymanager', icon='TRASH')
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No prop animation
prefs = bpy.context.preferences.addons[__package__].preferences
if prefs.resource_provider != 'local':
row = layout.row()
row.enabled = not BlendNet.addon.isManagerCreated()
row.prop(prefs, 'manager_instance_type', text='Type')
price = BlendNet.addon.getManagerPriceBG(prefs.manager_instance_type, context)
row = layout.row()
if price[0] < 0.0:
row.label(text='WARNING: Unable to find price for the type "%s": %s' % (
prefs.manager_instance_type, price[1]
), icon='ERROR')
else:
row.label(text='Calculated price: ~%s/Hour (%s)' % (round(price[0], 8), price[1]))
if prefs.resource_provider == 'local':
split = layout.split(factor=0.3)
split.label(text='Address')
split.label(text='%s:%s' % (prefs.manager_address, prefs.manager_port))
row = layout.row()
manager_info = BlendNet.addon.getResources(context).get('manager')
col = row.column()
col.enabled = BlendNet.addon.isManagerActive()
col.operator('blendnet.getservicelog', text='Service Log', icon='TEXT').agent_name = ''
col = row.column()
col.enabled = BlendNet.addon.isManagerStarted()
op = col.operator('blendnet.getnodelog', text='Node Log', icon='TEXT')
op.node_id = manager_info.get('id', '') if manager_info else ''
if manager_info:
layout.label(text='Manager instance:')
box = layout.box()
for key, value in manager_info.items():
split = box.split(factor=0.3)
split.label(text=key)
split.label(text=str(value))
if BlendNet.addon.isManagerActive():
info = BlendNet.addon.requestManagerInfo(context)
if info:
layout.label(text='Manager info:')
box = layout.box()
blender_version = info.get('blender', {}).get('version_string')
if blender_version:
split = box.split(factor=0.3)
split.label(text='blender')
split.label(text=blender_version)
for key, value in info.get('platform', {}).items():
split = box.split(factor=0.3)
split.label(text=key)
split.label(text=str(value))
class BlendNetAgentsPanel(bpy.types.Panel):
bl_idname = 'RENDER_PT_blendnet_agents'
bl_parent_id = 'RENDER_PT_blendnet_render'
bl_label = ' '
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'render'
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return context.preferences.addons[__package__].preferences.blendnet_show_panel and BlendNet.addon.checkProviderIsSelected()
def draw_header(self, context):
layout = self.layout
layout.label(text='Agents (%d)' % BlendNet.addon.getStartedAgentsNumber(context))
prefs = bpy.context.preferences.addons[__package__].preferences
if prefs.resource_provider == 'local':
layout.operator('blendnet.agentcreate', icon='ADD', text='')
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No prop animation
prefs = bpy.context.preferences.addons[__package__].preferences
if prefs.resource_provider != 'local':
row = layout.row()
row.prop(prefs, 'manager_agent_instance_type', text='Agents type')
row.enabled = not BlendNet.addon.isManagerStarted()
row = layout.row()
row.prop(prefs, 'manager_agents_max', text='Agents max')
row.enabled = not BlendNet.addon.isManagerStarted()
row = layout.row()
price = BlendNet.addon.getAgentPriceBG(prefs.manager_agent_instance_type, context)
if price[0] < 0.0:
row.label(text='ERROR: Unable to find price for the type "%s": %s' % (
prefs.manager_agent_instance_type, price[1]
), icon='ERROR')
else:
row.label(text='Calculated combined price: ~%s/Hour (%s)' % (
round(price[0] * prefs.manager_agents_max, 8), price[1]
))
min_price = BlendNet.addon.getMinimalCheapPriceBG(prefs.manager_agent_instance_type, context)
if min_price > 0.0:
row = layout.row()
row.label(text='Minimal combined price: ~%s/Hour' % (
round(min_price * prefs.manager_agents_max, 8),
))
if price[0] <= min_price:
row = layout.row()
row.label(text='ERROR: Selected cheap price is lower than minimal one', icon='ERROR')
agents = BlendNet.addon.getResources(context).get('agents', {})
if agents:
box = layout.box()
for inst_name in sorted(agents.keys()):
info = agents[inst_name]
split = box.split(factor=0.8)
split.label(text=info.get('name'))
row = split.row()
row.enabled = BlendNet.addon.isManagerActive()
# The Agent status
if info.get('error'):
row.label(icon='ERROR') # You need to check logs
if info.get('active'):
row.label(icon='CHECKMARK') # Agent is active
elif info.get('started'):
row.label(icon='REC') # Node is started, but Agent is initializing
elif info.get('stopped'):
row.label(icon='PAUSE') # Node is stopped
else:
row.label(icon='X') # Node is terminated or unknown state
row.enabled = bool(info.get('started') or info.get('stopped')) or prefs.resource_provider == 'local'
if info.get('active'):
row.operator('blendnet.getservicelog', text='', icon='TEXT').agent_name = info.get('name', '')
else:
col = row.column()
col.operator('blendnet.getnodelog', text='', icon='TEXT').node_id = info.get('id', '')
col.enabled = bool(info.get('started'))
row.operator('blendnet.agentremove', icon='TRASH', text='').agent_name = info.get('name', '')
class BlendNetRenderEngine(bpy.types.RenderEngine):
'''Continuous render engine allows to switch between the tasks'''
bl_idname = __package__
bl_label = "BlendNet (don't use as a primary engine)"
bl_use_postprocess = True
bl_use_preview = False
def __init__(self):
self._prev_status = None
self._prev_message = None
print('DEBUG: Init BlendNet render')
def __del__(self):
print('DEBUG: Delete BlendNet render')
def updateStats(self, status = None, message = None):
'''To update the status only if something is changed and print into console'''
status = status or self._prev_status or ''
message = message or self._prev_message or ''
self.update_stats(status, message)
if self._prev_status != status or self._prev_message != message:
print('INFO: Render status: %s, %s' % (status, message))
self._prev_status = status
self._prev_message = message
def secToTime(self, sec):
h = sec // 3600
m = (sec % 3600) // 60
out = str((sec % 3600) % 60)+'s'
if h or m:
out = str(m)+'m'+out
if h:
out = str(h)+'h'+out
return out
def render(self, depsgraph):
scene = depsgraph.scene
wm = bpy.context.window_manager
scale = scene.render.resolution_percentage / 100.0
self.size_x = int(scene.render.resolution_x * scale)
self.size_y = int(scene.render.resolution_y * scale)
rendering = True
prev_status = {}
prev_name = ''
loaded_final_render = False
temp_dir = tempfile.TemporaryDirectory(prefix='blendnet-preview_')
result = self.begin_result(0, 0, self.size_x, self.size_y)
while rendering:
time.sleep(1.0)
if self.test_break():
# TODO: render cancelled
self.updateStats(None, 'Cancelling...')
rendering = False
if len(wm.blendnet.manager_tasks) < wm.blendnet.manager_tasks_idx+1:
self.updateStats('Please select the task in BlendNet manager tasks list')
continue
task_name = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].name
if task_name != prev_name:
self.update_result(result)
prev_name = task_name
loaded_final_render = False
status = BlendNet.addon.managerTaskStatus(task_name)
if not status:
continue
self.updateStats(None, '%s: %s' % (task_name, status.get('state')))
if status.get('state') == 'RUNNING':
remaining = None
if status.get('remaining'):
remaining = self.secToTime(status.get('remaining'))
self.updateStats('Rendered samples: %s/%s | Remaining: %s' % (
status.get('samples_done'), status.get('samples'),
remaining,
))
update_render = None
if status.get('state') == 'COMPLETED':
if not loaded_final_render:
total_time = self.secToTime((status.get('end_time') or 0) - (status.get('start_time_actual') or 0))
out_file = wm.blendnet.manager_tasks[wm.blendnet.manager_tasks_idx].received
if out_file == 'skipped':
# File was skipped by the downloader, so download it to temp dir
out_file = BlendNet.addon.managerDownloadTaskResult(task_name, 'compose', temp_dir.name)
if out_file and os.path.isfile(out_file):
self.updateStats('Got the final result: %s | Task render time: %s' % (out_file, total_time))
update_render = out_file
loaded_final_render = True
else:
# File is going to be downloaded by BlendNet.addon.updateManagerTasks() soon
self.updateStats('%s | Task render time: %s' % (out_file, total_time))
elif status.get('result', {}).get('preview') != prev_status.get('result', {}).get('preview'):
out_file = BlendNet.addon.managerDownloadTaskResult(task_name, 'preview', temp_dir.name)
if out_file and os.path.isfile(out_file):
update_render = out_file
else:
# It's downloading on background, so not store it right now
status['result']['preview'] = prev_status.get('result', {}).get('preview')
if update_render:
if os.path.isfile(update_render):
try:
result.layers[0].load_from_file(update_render)
print('DEBUG: Loaded preview layer:', update_render)
except Exception as e:
print('DEBUG: Unable to load the preview layer:', e)
result.load_from_file(update_render)
print('DEBUG: Loaded render result file:', update_render)
else:
print('ERROR: Unable to load not existing result file "%s"' % (update_render,))
self.update_result(result)
prev_status = status
self.update_progress(status.get('samples_done')/status.get('samples', 1))
self.end_result(result)
def loadProvidersSettings():
'''Get the available providers settings to set and load them during registration of the class'''
all_settings = BlendNet.addon.getProvidersSettings()
for provider, provider_settings in all_settings.items():
for key, data in provider_settings.items():
path = 'provider_' + provider + '_' + key
print('DEBUG: registering provider config:', path)
if data.get('type') in ('string', 'path'):
BlendNetAddonPreferences.__annotations__[path] = StringProperty(
name = data.get('name'),
description = data.get('description'),
subtype = 'FILE_PATH' if data['type'] == 'path' else 'NONE',
update = BlendNet.addon.updateProviderSettings,
)
elif data.get('type') == 'choice':
BlendNetAddonPreferences.__annotations__[path] = EnumProperty(
name = data.get('name'),
description = data.get('description'),
items = data.get('values'),
update = BlendNet.addon.updateProviderSettings,
)
# Additional field to store string value (otherwise it's hard on init when
# value of enum is integer and has no items to choose from)
BlendNetAddonPreferences.__annotations__[path+'_value'] = StringProperty(
name = data.get('name'),
description = data.get('description'),
)
else:
print('ERROR: Unknown provider "%s" setting "%s" type: %s' % (provider, key, data.get('type')))
def initPreferences():
'''Will init the preferences with defaults'''
prefs = bpy.context.preferences.addons[__package__].preferences
# Set defaults for preferences
# Update resource_provider anyway to set the addon var
prefs.resource_provider = prefs.resource_provider or BlendNet.addon.getAddonDefaultProvider()
# Since default for property will be regenerated every restart
# we generate new session id if the current one is empty
if prefs.session_id == '':
prefs.session_id = ''
if prefs.manager_password_hidden == '':
prefs.manager_password_hidden = ''
if prefs.agent_password_hidden == '':
prefs.agent_password_hidden = ''
BlendNet.addon.fillAvailableBlenderDists()
# Getting provider info to make sure all the settings are ok
# for current provider configuration
BlendNet.addon.getProviderInfo()
def register():
BlendNet.addon.initAddonLog()
BlendNet.providers.loadProviders()
loadProvidersSettings()
bpy.utils.register_class(BlendNetAddonPreferences)
initPreferences()
bpy.utils.register_class(BlendNetSceneSettings)
bpy.utils.register_class(BlendNetManagerTask)
bpy.utils.register_class(TASKS_UL_list)
bpy.utils.register_class(BlendNetSessionProperties)
bpy.utils.register_class(BlendNetRenderEngine)
bpy.utils.register_class(BlendNetRunTaskOperation)
bpy.utils.register_class(BlendNetTaskPreviewOperation)
bpy.utils.register_class(BlendNetTaskInfoOperation)
bpy.utils.register_class(BlendNetTaskMessagesOperation)
bpy.utils.register_class(BlendNetTaskDetailsOperation)
bpy.utils.register_class(BlendNetTaskDownloadOperation)
bpy.utils.register_class(BlendNetTaskRunOperation)
bpy.utils.register_class(BlendNetTaskStopOperation)
bpy.utils.register_class(BlendNetTasksStopStartedOperation)
bpy.utils.register_class(BlendNetTaskRemoveOperation)
bpy.utils.register_class(BlendNetTasksRemoveEndedOperation)
bpy.utils.register_class(BlendNetAgentRemoveOperation)
bpy.utils.register_class(BlendNetAgentCreateOperation)
bpy.utils.register_class(BlendNetTaskMenu)
bpy.utils.register_class(BlendNetGetServiceLogOperation)
bpy.utils.register_class(BlendNetGetAddonLogOperation)
bpy.utils.register_class(BlendNetGetNodeLogOperation)
bpy.utils.register_class(BlendNetRenderPanel)
bpy.utils.register_class(BlendNetToggleManager)
bpy.utils.register_class(BlendNetDestroyManager)
bpy.utils.register_class(BlendNetManagerPanel)
bpy.utils.register_class(BlendNetAgentsPanel)
def unregister():
bpy.utils.unregister_class(BlendNetAgentsPanel)
bpy.utils.unregister_class(BlendNetManagerPanel)
bpy.utils.unregister_class(BlendNetToggleManager)
bpy.utils.unregister_class(BlendNetDestroyManager)
bpy.utils.unregister_class(BlendNetRenderPanel)
bpy.utils.unregister_class(BlendNetGetNodeLogOperation)
bpy.utils.unregister_class(BlendNetGetAddonLogOperation)
bpy.utils.unregister_class(BlendNetGetServiceLogOperation)
bpy.utils.unregister_class(BlendNetTaskMenu)
bpy.utils.unregister_class(BlendNetTaskInfoOperation)
bpy.utils.unregister_class(BlendNetAgentCreateOperation)
bpy.utils.unregister_class(BlendNetAgentRemoveOperation)
bpy.utils.unregister_class(BlendNetTasksRemoveEndedOperation)
bpy.utils.unregister_class(BlendNetTaskRemoveOperation)
bpy.utils.unregister_class(BlendNetTasksStopStartedOperation)
bpy.utils.unregister_class(BlendNetTaskStopOperation)
bpy.utils.unregister_class(BlendNetTaskRunOperation)
bpy.utils.unregister_class(BlendNetTaskDownloadOperation)
bpy.utils.unregister_class(BlendNetTaskDetailsOperation)
bpy.utils.unregister_class(BlendNetTaskMessagesOperation)
bpy.utils.unregister_class(BlendNetTaskPreviewOperation)
bpy.utils.unregister_class(BlendNetRunTaskOperation)
bpy.utils.unregister_class(BlendNetRenderEngine)
bpy.utils.unregister_class(BlendNetSessionProperties)
bpy.utils.unregister_class(TASKS_UL_list)
bpy.utils.unregister_class(BlendNetManagerTask)
bpy.utils.unregister_class(BlendNetSceneSettings)
bpy.utils.unregister_class(BlendNetAddonPreferences)
if __name__ == '__main__':
register()
|
"""Configuration for engines and models"""
import os
POOL_RECYCLE = int(os.environ.get("POOL_RECYCLE", 300))
DB_POOL_MIN_SIZE = int(os.environ.get("DB_POOL_MIN_SIZE", 2))
DB_POOL_MAX_SIZE = int(os.environ.get("DB_POOL_MAX_SIZE", 10))
|
from pandas import DataFrame as Pandas_DataFrame
class Pandas_MicrostatesDataFrame(Pandas_DataFrame):
def __init__(self):
from openktn.native.microstate import attributes as microstate_attributes
super().__init__(columns=microstate_attributes.keys())
|
"""
Most of the logic/ heavy-lifting is based off of:
github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/coco_evaluation.py#L225
"""
################################################################################
## Import packages. ##
################################################################################
from lvis import LVIS, LVISResults, LVISEval
from operator import itemgetter
from tabulate import tabulate
import numpy as np
import itertools
import logging
import pickle
import json
import math
import os
from config import fetch_config, print_args_stdout
from utils import fetch_aps, evaluate_map, print_aps
import ipdb
################################################################################
## Run stuff. ##
################################################################################
def main():
config = fetch_config()
print_args_stdout(config)
ipdb.set_trace()
print("Running eval.")
lvis_eval = LVISEval(config.ann_path, config.results_path, config.ann_type)
lvis_eval.run()
lvis_eval.print_results()
print("Finished eval.")
ipdb.set_trace()
# All precision values: 10 x 101 x 1230 x 4
# precision has dims (iou, recall, cls, area range)
precisions = lvis_eval.eval['precision']
with open(config.ann_path, 'r') as outfile:
gt = json.load(outfile)
cat_metas = gt['categories']
cats = []
for cat_meta in cat_metas:
cats.append((cat_meta['id'], cat_meta['name']))
cats.sort(key=itemgetter(0))
class_names = [cat[1] for cat in cats]
area_type = 0
results_per_category, per_cat_results = fetch_aps(precisions, class_names, area_type)
print("mAP for area type {}: {}".format(area_type, evaluate_map(results_per_category)))
# Print for eye-balling.
# print_aps(results_per_category, class_names, n_cols=6)
# Store results_per_category into a JSON.
with open(config.aps_json_path, 'w') as json_file:
json.dump(per_cat_results, json_file, indent=4)
# Store the 4D precisions tensor as a PKL.
with open(config.prec_pkl_path, 'wb') as pkl_file:
pickle.dump(precisions, pkl_file)
################################################################################
## Execute main. ##
################################################################################
if __name__ == '__main__':
main()
|
'''
Provides basic and utility classes and functions.
'''
import os
os.environ["QT_MAC_WANTS_LAYER"] = "1"
import sys, threading
from PySide2 import QtGui, QtCore, QtWidgets
__author__ = "Yuehao Wang"
__pdoc__ = {}
class Object(object):
'''
Base class of other classes in `pylash`, providing fundamental interfaces for `pylash` objects.
'''
latestObjectIndex = 0
'''
The ID number of the last instantiated `pylash` object. It also represents the number of
instantiated `pylash` objects. Note: it is .
Type: `int`, read-only
'''
def __init__(self):
self.objectIndex = Object.latestObjectIndex
'''
The unique ID number of the object.
Type: `int`, read-only
'''
self.name = "instance" + str(self.objectIndex)
'''
The name of the object. Default: `"instance" + str(self.objectIndex)`.
Type: `str`
'''
Object.latestObjectIndex += 1
def _nonCopyableAttrs(self):
return ["objectIndex", "name"]
def copyFrom(self, source):
'''
Copies all instance attributes from `source` to self. The `source` should have the same
type with selves, or be an object instantiated from the parent classes of the self class.
Parameters
----------
source : pylash.core.Object
The source object to be copied from.
'''
if not source or not isinstance(self, source.__class__):
raise TypeError("Object.copyFrom(source): cannot copy from the parameter 'source'.")
noncopyable = self._nonCopyableAttrs()
attrs = source.__dict__
for attr_name in attrs:
if attr_name in noncopyable:
continue
setattr(self, attr_name, attrs[attr_name])
__pdoc__["CanvasWidget.staticMetaObject"] = False
class CanvasWidget(QtWidgets.QWidget):
'''
A `QWidget` object which presents the main window, propagates window events, performs
rendering, etc. In most cases, it is not necessary to use this class.
'''
def __init__(self):
super(CanvasWidget, self).__init__()
self.setMouseTracking(True)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
def paintEvent(self, event):
'''
Override [QWidget.paintEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.paintEvent).
'''
stage._onShow()
def mousePressEvent(self, event):
'''
Override [QWidget.mousePressEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.mousePressEvent).
'''
self.__enterMouseEvent(event, "mouse_down")
def mouseMoveEvent(self, event):
'''
Override [QWidget.mouseMoveEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.mouseMoveEvent).
'''
stage._useHandCursor = False
self.__enterMouseEvent(event, "mouse_move")
if stage._useHandCursor:
self.setCursor(QtCore.Qt.PointingHandCursor)
else:
self.setCursor(QtCore.Qt.ArrowCursor)
def mouseReleaseEvent(self, event):
'''
Override [QWidget.mouseReleaseEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.mouseReleaseEvent).
'''
self.__enterMouseEvent(event, "mouse_up")
def mouseDoubleClickEvent(self, event):
'''
Override [QWidget.mouseDoubleClickEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.mouseDoubleClickEvent).
'''
self.__enterMouseEvent(event, "mouse_dbclick")
def keyPressEvent(self, event):
'''
Override [QWidget.keyPressEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.keyPressEvent).
'''
if not event.isAutoRepeat():
self.__enterKeyboardEvent(event, "key_down")
def keyReleaseEvent(self, event):
'''
Override [QWidget.keyReleaseEvent](https://doc.qt.io/qtforpython/PySide2/QtWidgets/QWidget.html#PySide2.QtWidgets.PySide2.QtWidgets.QWidget.keyReleaseEvent).
'''
if not event.isAutoRepeat():
self.__enterKeyboardEvent(event, "key_up")
def __enterKeyboardEvent(self, event, eventType):
from .events import Event
s = stage
if not s:
return
for o in s._keyboardEventList:
if o["eventType"] == eventType:
eve = Event(eventType)
eve.keyCode = event.key()
eve.keyText = event.text()
o["listener"](eve)
def __enterMouseEvent(self, event, eventType):
e = {"offsetX" : event.x(), "offsetY" : event.y(), "eventType" : eventType, "target" : None}
stage._enterMouseEvent(e, {"x" : 0, "y" : 0, "scaleX" : 1, "scaleY" : 1})
class Stage(Object):
'''
`pylash.core.Stage` provides interfaces to control and configure global settings and operations
regarding rendering and events. Intuitively, it is a scene where game objects like
characters, maps, effects, etc. can be presented.
This class is not expected to be instantiated directly. Instead, it is recommended to
use the global `stage` object, which is a builtin instance of this class and will be
configured by `pylash.core.init` and other initialization steps.
'''
PARENT = "stage_parent_root"
'''
An identifier representing the parent of the `stage` object.
Type: `str`, read-only
'''
def __init__(self):
super(Stage, self).__init__()
self.parent = Stage.PARENT
'''
The parent of `stage` object.
Type: `str`, read-only
'''
self.x = 0
self.y = 0
self.scaleX = 1
self.scaleY = 1
self.rotation = 0
self.width = 0
'''
The width of the game window.
Type: `int`, read-only
'''
self.height = 0
'''
The height of the game window.
Type: `int`, read-only
'''
self.speed = 0
'''
The repainting rate of the game window. Unit: millisecond.
Type: `float`, read-only
'''
self.app = None
'''
The repainting rate of the game window.
Type: `QApplication`, read-only
'''
self.canvasWidget = None
'''
The game window widget.
Type: `pylash.core.CanvasWidget`, read-only
'''
self.canvas = None
'''
The game window painter.
Type: `QPainter`, read-only
'''
self.timer = None
'''
The window repainting timer.
Type: `QTimer`, read-only
'''
self.childList = []
'''
The window repainting timer.
Type: `list` of `pylash.display.DisplayObject`, read-only
'''
self.backgroundColor = None
'''
The background color of the game window. Default: `None` (no background)
Type: `str`, `pylash.display.GradientColor`
'''
self.useAntialiasing = True
'''
Disable/enable antialiasing.
Type: `bool`
'''
self._useHandCursor = False
self._keyboardEventList = []
def copyFrom(self, source):
raise Exception("Stage objects cannot be copied from others.")
def _setCanvas(self, speed, title, width, height):
self.speed = speed
self.width = width
self.height = height
self.canvas = QtGui.QPainter()
self.canvasWidget = CanvasWidget()
self.canvasWidget.setWindowTitle(title)
self.canvasWidget.setFixedSize(width, height)
self.canvasWidget.show()
self.timer = QtCore.QTimer()
self.timer.setInterval(speed)
self.timer.start()
self.timer.timeout.connect(self.canvasWidget.update)
def _onShow(self):
self.canvas.begin(self.canvasWidget)
if self.useAntialiasing:
self.canvas.setRenderHint(QtGui.QPainter.Antialiasing, True)
else:
self.canvas.setRenderHint(QtGui.QPainter.Antialiasing, False)
if self.backgroundColor is not None:
self.canvas.fillRect(0, 0, self.width, self.height, getColor(self.backgroundColor))
else:
self.canvas.eraseRect(0, 0, self.width, self.height)
self._showDisplayList(self.childList)
self.canvas.end()
def _showDisplayList(self, childList):
for o in childList:
if hasattr(o, "_show") and hasattr(o._show, "__call__"):
o._show(self.canvas)
def _enterMouseEvent(self, event, cd):
childList = self.childList[:: -1]
currentCd = {"x" : cd["x"], "y" : cd["y"], "scaleX" : cd["scaleX"], "scaleY" : cd["scaleY"]}
for o in childList:
if hasattr(o, "_enterMouseEvent") and hasattr(o._enterMouseEvent, "__call__") and o._enterMouseEvent(event, currentCd, o._mouseIsOn):
break
def setFrameRate(self, speed):
'''
Sets the repainting rate of the game. Unit: millisecond.
Parameters
----------
speed : float
The repainting rate.
See Also
--------
pylash.core.Stage.speed
'''
if not self.timer:
return
self.speed = speed
self.timer.setInterval(speed)
def addChild(self, child):
'''
Appends `child` to the `stage`'s display list, then the `child` object will be rendered
on the game window.
Parameters
----------
child : pylash.display.DisplayObject
The display object to be added to the stage.
'''
if child is not None:
child.parent = self
self.childList.append(child)
else:
raise TypeError("Stage.addChild(child): parameter 'child' must be a display object.")
def removeChild(self, child):
'''
Removes `child` from the `stage`'s display list, then the `child` object will NOT be
rendered on the game window.
Parameters
----------
child : pylash.display.DisplayObject
The display object to be removed from the stage.
'''
if child is not None:
self.childList.remove(child)
child.die()
else:
raise TypeError("Stage.removeChild(child): parameter 'child' must be a display object.")
def addEventListener(self, e, listener):
'''
Adds an event listener to the `stage` object.
Parameters
----------
e : str or pylash.events.Event
The event type.
listener : function
The listener, i.e. a function invoked when the event is dispatched.
'''
from .events import Event
e = Event(e)
if hasattr(e, "eventType"):
if e.eventType == "key_down" or e.eventType == "key_up":
self._keyboardEventList.append({
"eventType" : e.eventType,
"listener" : listener
})
def removeEventListener(self, e, listener = None):
'''
Removes event listener(s) from the `stage` object. If the `listener` is ignored, all
event listeners with event type `e` will be removed.
Parameters
----------
e : str or pylash.events.Event
The event type.
listener : function, optional
The listener.
'''
from .events import Event
e = Event(e)
if hasattr(e, "eventType"):
if e.eventType == "key_down" or e.eventType == "key_up":
for i, o in enumerate(self._keyboardEventList):
if o["eventType"] == e.eventType and (listener == None or o["listener"] == listener):
self._keyboardEventList.pop(i)
stage = Stage()
class KeyCode(object):
'''
An enumeration of available key codes. Each class attribute stands for a key code.
For more detailed meaning of each attribute, please refer to [the related documentation
of PySide2](https://doc.qt.io/qtforpython/PySide2/QtCore/Qt.html#PySide2.QtCore.PySide2.QtCore.Qt.Key).
Example
-------
```
def keyDown(e):
if e.keyCode == KeyCode.KEY_RIGHT:
print("press RIGHT")
elif e.keyCode == KeyCode.KEY_LEFT:
print("press LEFT")
stage.addEventListener(KeyboardEvent.KEY_DOWN, keyDown)
```
See Also
--------
pylash.core.Stage.addEventListener
pylash.core.Stage.removeEventListener
pylash.events.KeyboardEvent
'''
def __init__(self):
Exception("KeyCode cannot be instantiated.")
for o in dir(QtCore.Qt):
if o.find("Key_") == 0:
value = getattr(QtCore.Qt, o)
propertyName = o.upper()
setattr(KeyCode, propertyName, value)
class UnityOfDictAndClass(object):
'''
A static class providing interfaces of unified getting/setting operations on key-value pairs of
`dict` objects and attributes of other instances.
'''
def __init__(self):
Exception("UnityOfDictAndClass cannot be instantiated.")
@staticmethod
def set(obj, key, value):
'''
If `obj` is a `dict`, `obj[key] = value`. Otherwise, `setattr(obj, key, value)`.
Parameters
----------
obj : dict or object
The target to set a value.
key : str
A `dict` key or an attribute name.
value : any
The value that the attribute or key-value pair to store.
'''
if isinstance(obj, dict):
obj[key] = value
else:
setattr(obj, key, value)
@staticmethod
def get(obj, key):
'''
If `obj` is a `dict`, returns `obj[key]`. Otherwise, returns `getattr(obj, key)`. If the `key`
is not a key or is an undefined attribute in `obj`, returns `None`.
Parameters
----------
obj : dict or object
The target to retrieve values.
key : str
A `dict` key or an attribute name.
Returns
-------
any
The value that the attribute or key-value pair stores.
'''
value = None
if isinstance(obj, dict):
if key in obj:
value = obj[key]
else:
if hasattr(obj, key):
value = getattr(obj, key)
return value
@staticmethod
def has(obj, key):
'''
Returns `True` if the `key` is a key or a defined attribute in `obj`. Otherwise, returns `False`.
Parameters
----------
obj : dict or object
The target to check existence of a key-value pair or an attribute.
key : str
A `dict` key or an attribute name.
Returns
-------
bool
Existence of a key-value pair or an attribute.
'''
if isinstance(obj, dict):
return (key in obj)
else:
return hasattr(obj, key)
def init(speed, title, width, height, callback):
'''
The initialization function of `pylash`. This function will create a game window, set its
size to \\(width \\times height\\) and window title to `title`. The game window will repaint
per `speed` milliseconds. After the setup of the game window, `callback` will be
invoked, which is the entrance function of your game.
Parameters
----------
speed : float
The window repainting rate. Generally, it is supposed to be \\(1000 / FPS\\).
title : str
The window title.
width : int
The window's width.
height : int
The window's height.
callback : funtion
The callback function invoked after the setup of game window.
Example
-------
```
def main():
print("Hello, world!")
init(1000 / 60, "Init Test", 100, 100, main)
```
'''
stage.app = QtWidgets.QApplication(sys.argv)
stage._setCanvas(speed, title, width, height)
if not hasattr(callback, "__call__"):
raise TypeError("init(speed, title, width, height, callback): parameter 'callback' must be a function.")
callback()
sys.exit(stage.app.exec_())
def addChild(child):
'''
Identical to `stage.addChild`.
Parameters
----------
child : pylash.display.DisplayObject
The display object to be appended to the root display list.
See Also
--------
Stage.addChild
'''
stage.addChild(child)
def removeChild(child):
'''
Identical to `stage.removeChild`.
Parameters
----------
child : pylash.display.DisplayObject
The display object to be removed from the root display list.
See Also
--------
Stage.removeChild
'''
stage.removeChild(child)
__pdoc__["getColor"] = False
def getColor(color):
if isinstance(color, QtGui.QColor) or isinstance(color, QtGui.QGradient):
return color
elif hasattr(color, "addColorStop"):
return color.value
elif not color:
return QtCore.Qt.transparent
else:
if isinstance(color, int):
color = hex(color)
if color[0 : 2].lower() == "0x":
color = "#" + color[2 ::]
colorObj = QtGui.QColor()
colorObj.setNamedColor(color)
return colorObj
__pdoc__["removeItemsInList"] = False
def removeItemsInList(theList, condition):
if not hasattr(condition, "__call__") or not isinstance(theList, list):
return
targetList = []
for o in theList:
if condition(o):
targetList.append(o)
for i in targetList:
theList.remove(i)
return targetList
|
n = int(input("Digite um número inteiro: "))
ant = n-1
suc = n+1
print("O número antecessor é: {}" .format(ant))
print("O numero sucessor é: {}" .format(suc))
|
"""
Preprocess the XSUM dataset
There are several noisy training instances which do not contain any words in pre-defined vocabulary of NTM.
We remove these instances.
Here are the details about these removed instance:
- instance #37993:
input: Here are our favourites:
target: On Monday, we asked for you to send us your favourite shop pun names.
- instance #47104:
input: Here are some of the Ethiopian runner's greatest feats.
target: Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.
- instance #71767:
input: JANUARYFEBRUARYMARCHAPRILMAYJUNE
target: As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.
- instance #94109:
input: Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim
target: FOOTBALL LEAGUE RESULTS
- instance #95592:
input: KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM
target: Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.
"""
import os
train_input, train_target = [], []
hardcoded_delete_input = ['Here are our favourites:\n', "Here are some of the Ethiopian runner's greatest feats.\n",
'JANUARYFEBRUARYMARCHAPRILMAYJUNE\n',
'Donegal 1-14 1-12 MayoDown 0-06 0-22 KerryDerry 2-12 1-18 GalwayLaois 0-14 1-14 TyroneMeath 1-13 1-20 CavanAntrim 2-14 0-09 Leitrim\n',
'KERRY 1-13 1-8 DONEGALMONAGHAN 1-12 2-11 MAYOROSCOMMON 1-12 0-6 DOWNFERMANAGH 1-17 0-10 LAOISLONDON 0-11 1-11 ANTRIMAllianz Hurling LeagueWESTMEATH 2-11 0-10 ANTRIM\n']
hardcoded_delete_target = ['On Monday, we asked for you to send us your favourite shop pun names.\n',
'Haile Gebrselassie has announced his retirement from competitive running, bringing to an end a 25-year career in which he claimed two Olympic gold medals, eight World Championship victories and set 27 world records.\n',
'As 2015 draws to an end, we take a look back at some of the major stories of the year, along with others that proved popular with readers.\n',
'FOOTBALL LEAGUE RESULTS\n',
'Tomas Corrigan shone as Fermanagh beat Laois while Antrim stayed top of Division Four with victory over London.\n']
with open(f"data/xsum/train.source", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_input:
train_input.append(line)
with open(f"data/xsum/train.target", "r", encoding='utf8') as f:
for line in f:
if line not in hardcoded_delete_target:
train_target.append(line)
print(f"there are {len(train_input)} in the new source file")
print(f"there are {len(train_target)} in the new target file")
if os.path.exists("data/xsum/train.source"):
os.remove("data/xsum/train.source")
if os.path.exists("data/xsum/train.target"):
os.remove("data/xsum/train.target")
with open(f"data/xsum/train.source", "w", encoding='utf8') as f:
for item in train_input:
f.write(item)
with open(f"data/xsum/train.target", "w", encoding='utf8') as f:
for item in train_target:
f.write(item)
|
#
# Copyright (c) Ionplus AG and contributors. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for details.
#
setCycleEnableNT = '''
create procedure _legacy_.setCycleEnableNT($state int, $run varchar(10), $cycle int)
main:
begin
declare $machine_run_number int;
declare $run_id int;
declare $cycle_id int;
set $machine_run_number = cast(regexp_replace($run, '[^0-9]', '') as signed);
select run.id into $run_id
from _brahma_.run
where run.machine_number = %(machine_number)s
and run.machine_run_number = $machine_run_number;
select cycle.id into $cycle_id
from _brahma_.cycle
inner join _brahma_.run on cycle.run_id = run.id
where cycle.number = $cycle and run.id = $run_id;
call _brahma_.set_cycle_enabled($cycle_id, $state);
end;
'''
setRunEnableNT = '''
create procedure _legacy_.setRunEnableNT($state int, $run varchar(10))
main:
begin
declare $machine_run_number int;
declare $run_id int;
set $machine_run_number = cast(regexp_replace($run, '[^0-9]', '') as signed);
select run.id into $run_id
from _brahma_.run
where run.machine_number = %(machine_number)s
and run.machine_run_number = $machine_run_number;
call _brahma_.set_run_enabled($run_id, $state);
end;
'''
|
import unittest
import os
from sklearn import datasets
from sklearn.utils.validation import check_random_state
from stacked_generalization.lib.stacking import StackedClassifier, FWLSClassifier
from stacked_generalization.lib.stacking import StackedRegressor, FWLSRegressor
from stacked_generalization.lib.joblibed import JoblibedClassifier, JoblibedRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import Ridge
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, log_loss, accuracy_score
from sklearn.utils.testing import assert_less
import numpy as np
from stacked_generalization.lib.util import numpy_c_concatenate
from stacked_generalization.lib.util import saving_predict_proba
from stacked_generalization.lib.util import get_model_id
from stacked_generalization.lib.util import multiple_feature_weight
from sklearn.model_selection import StratifiedKFold
from numpy.testing import assert_allclose
import glob
class TestStackedClassfier(unittest.TestCase):
def setUp(self):
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
self.iris = iris
def test_stacked_classfier_extkfold(self):
bclf = LogisticRegression(random_state=1)
clfs = [RandomForestClassifier(n_estimators=40, criterion = 'gini', random_state=1),
RidgeClassifier(random_state=1),
]
sl = StackedClassifier(bclf,
clfs,
n_folds=3,
verbose=0,
Kfold=list(StratifiedKFold(3).split(self.iris.data, self.iris.target)),
stack_by_proba=False,
oob_score_flag=True,
oob_metrics=log_loss)
sl.fit(self.iris.data, self.iris.target)
score = sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.9, "Failed with score = {0}".format(score))
def test_stacked_classfier(self):
bclf = LogisticRegression(random_state=1)
clfs = [RandomForestClassifier(n_estimators=40, criterion = 'gini', random_state=1),
ExtraTreesClassifier(n_estimators=30, criterion = 'gini', random_state=3),
GradientBoostingClassifier(n_estimators=25, random_state=1),
RidgeClassifier(random_state=1),
]
for n_folds, stack_by_proba in self.iter_for_stack_param():
sl = StackedClassifier(bclf,
clfs,
n_folds=n_folds,
verbose=0,
stack_by_proba=stack_by_proba,
oob_score_flag=True)
sl.fit(self.iris.data, self.iris.target)
score = sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.8, "Failed with score = {0}".format(score))
self.assertGreater(score, 0.8, "Failed with score = {0}".format(sl.oob_score_))
print('oob_score: {0} @n_folds={1}, stack_by_proba={2}'
.format(sl.oob_score_, sl.n_folds, sl.stack_by_proba))
for csv_file in glob.glob("*.csv"):
os.remove(csv_file)
for csv_file in glob.glob("*.pkl"):
os.remove(csv_file)
sl = StackedClassifier(bclf,
clfs,
oob_score_flag=True,
save_stage0=True)
sl.fit(self.iris.data, self.iris.target)
sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.8, "Failed with score = {0}".format(score))
sl.fit(self.iris.data, self.iris.target)
sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.8, "Failed with score = {0}".format(score))
self.assertTrue(glob.glob('ExtraTreesClassifier_*.csv'))
for csv_file in glob.glob("*.csv"):
os.remove(csv_file)
for csv_file in glob.glob("*.pkl"):
os.remove(csv_file)
def iter_for_stack_param(self):
yield 2, True
yield 4, True
yield 2, False
yield 3, False
def test_stacked_regressor(self):
bclf = LinearRegression()
clfs = [RandomForestRegressor(n_estimators=50, random_state=1),
GradientBoostingRegressor(n_estimators=25, random_state=1),
Ridge(random_state=1)]
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
sr = StackedRegressor(bclf,
clfs,
n_folds=3,
verbose=0,
oob_score_flag=True)
sr.fit(X_train, y_train)
mse = mean_squared_error(y_test, sr.predict(X_test))
assert_less(mse, 6.0)
def test_concatenate(self):
A = None
B = np.array([[1,2],[3,4]])
np.testing.assert_equal(numpy_c_concatenate(A, B), B)
A = np.array([[0], [1]])
np.testing.assert_equal(numpy_c_concatenate(A, B), [[0,1,2], [1,3,4]])
def test_save_prediction(self):
model = RandomForestClassifier()
model.id = get_model_id(model)
model.fit(self.iris.data, self.iris.target)
indexes = np.fromfunction(lambda x: x, (self.iris.data.shape[0], ), dtype=np.int32)
saving_predict_proba(model, self.iris.data, indexes)
any_file_removed = False
for filename in os.listdir('.'):
if filename.startswith('RandomForestClassifier'):
os.remove(filename)
any_file_removed = True
self.assertTrue(any_file_removed)
def test_fwls_classfier(self):
feature_func = lambda x: np.ones(x.shape)
bclf = LogisticRegression(random_state=1)
clfs = [RandomForestClassifier(n_estimators=40, criterion = 'gini', random_state=1),
RidgeClassifier(random_state=1),
]
sl = FWLSClassifier(bclf,
clfs,
feature_func=feature_func,
n_folds=3,
verbose=0,
Kfold=list(StratifiedKFold(3).split(self.iris.data, self.iris.target)),
stack_by_proba=False)
sl.fit(self.iris.data, self.iris.target)
score = sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.9, "Failed with score = {0}".format(score))
def test_fwls_regressor(self):
feature_func = lambda x: np.ones(x.shape)
bclf = LinearRegression()
clfs = [RandomForestRegressor(n_estimators=50, random_state=1),
GradientBoostingRegressor(n_estimators=25, random_state=1),
Ridge(random_state=1)]
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
sr = FWLSRegressor(bclf,
clfs,
feature_func,
n_folds=3,
verbose=0,
oob_score_flag=True)
sr.fit(X_train, y_train)
mse = mean_squared_error(y_test, sr.predict(X_test))
assert_less(mse, 6.0)
def test_multiple_feature_weight(self):
A = np.array([[1,2],[3,4],[5,6]])
B = np.array([[1],[1],[1]])
C = multiple_feature_weight(A, B)
np.testing.assert_equal(C, A)
B = np.array([[2],[2],[2]])
C = multiple_feature_weight(A, B)
np.testing.assert_equal(C, np.array([[2,4],[6,8],[10,12]]))
B = np.array([[1,2],[2,1],[1,2]])
C = multiple_feature_weight(A, B)
np.testing.assert_equal(C, np.array([[ 1, 2, 2, 4],
[ 6, 3, 8, 4],
[ 5, 10, 6, 12]]))
class TestJoblibedClassfier(unittest.TestCase):
def setUp(self):
iris = datasets.load_iris()
rng = check_random_state(0)
iris.data = iris.data
iris.target = iris.target
self.iris = iris
for csv_file in glob.glob("*.csv"):
os.remove(csv_file)
def test_classifier(self):
index = [i for i in range(len(self.iris.data))]
rf = RandomForestClassifier()
jrf = JoblibedClassifier(rf, "rf", cache_dir='')
jrf.fit(self.iris.data, self.iris.target, index)
prediction = jrf.predict(self.iris.data, index)
score = accuracy_score(self.iris.target, prediction)
self.assertGreater(score, 0.9, "Failed with score = {0}".format(score))
rf = RandomForestClassifier(n_estimators=20)
jrf = JoblibedClassifier(rf, "rf", cache_dir='')
jrf.fit(self.iris.data, self.iris.target)
index = [i for i in range(len(self.iris.data))]
prediction2 = jrf.predict(self.iris.data, index)
self.assertTrue((prediction == prediction2).all())
def test_regressor(self):
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1,
noise=1.0)
X_train, y_train = X[:200], y[:200]
index = [i for i in range(200)]
rf = RandomForestRegressor()
jrf = JoblibedRegressor(rf, "rfr", cache_dir='')
jrf.fit(X_train, y_train, index)
prediction = jrf.predict(X_train, index)
mse = mean_squared_error(y_train, prediction)
assert_less(mse, 6.0)
rf = RandomForestRegressor(n_estimators=20)
jrf = JoblibedRegressor(rf, "rfr", cache_dir='')
jrf.fit(X_train, y_train, index)
prediction2 = jrf.predict(X_train, index)
assert_allclose(prediction, prediction2)
if __name__ == '__main__':
unittest.main()
|
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import sys
import hashlib
if sys.version_info < (3, 6):
import sha3
from OpenSSL import crypto
from OpenSSL import rand
import ecdsa
import shutil
from slugify import slugify
from collections import namedtuple
from enum import Enum
from google.protobuf import timestamp_pb2
from common import common_pb2 as common_dot_common_pb2
from common import configuration_pb2 as common_dot_configuration_pb2
from common import msp_principal_pb2
from msp import mspconfig_pb2
from peer import configuration_pb2 as peer_dot_configuration_pb2
# import orderer
from orderer import configuration_pb2 as orderer_dot_configuration_pb2
import orderer_util
import os
import re
import shutil
import compose
import uuid
# Type to represent tuple of user, nodeName, ogranization
NodeAdminTuple = namedtuple("NodeAdminTuple", ['user', 'nodeName', 'organization'])
class ContextHelper:
def __init__(self, context):
self.context = context
self.guuid = GetUUID()
def getGuuid(self):
return self.guuid
def getTmpPath(self):
pathToReturn = "tmp"
if not os.path.isdir(pathToReturn):
os.makedirs(pathToReturn)
return pathToReturn
def getCachePath(self):
pathToReturn = os.path.join(self.getTmpPath(), "cache")
if not os.path.isdir(pathToReturn):
os.makedirs(pathToReturn)
return pathToReturn
def getTmpProjectPath(self):
pathToReturn = os.path.join(self.getTmpPath(), self.guuid)
if not os.path.isdir(pathToReturn):
os.makedirs(pathToReturn)
return pathToReturn
def getTmpPathForName(self, name, copyFromCache=False):
'Returns the tmp path for a file, and a flag indicating if the file exists. Will also check in the cache and copy to tmp if copyFromCache==True'
slugifiedName = slugify(name)
tmpPath = os.path.join(self.getTmpProjectPath(), slugifiedName)
fileExists = False
if os.path.isfile(tmpPath):
# file already exists in tmp path, return path and exists flag
fileExists = True
elif copyFromCache:
# See if the file exists in cache, and copy over to project folder.
cacheFilePath = os.path.join(self.getCachePath(), slugifiedName)
if os.path.isfile(cacheFilePath):
shutil.copy(cacheFilePath, tmpPath)
fileExists = True
return (tmpPath, fileExists)
def copyToCache(self, name):
srcPath, fileExists = self.getTmpPathForName(name, copyFromCache=False)
assert fileExists, "Tried to copy source file to cache, but file not found for: {0}".format(srcPath)
# Now copy to the cache if it does not already exist
cacheFilePath = os.path.join(self.getCachePath(), slugify(name))
if not os.path.isfile(cacheFilePath):
shutil.copy(srcPath, cacheFilePath)
def isConfigEnabled(self, configName):
return self.context.config.userdata.get(configName, "false") == "true"
@classmethod
def GetHelper(cls, context):
if not "contextHelper" in context:
context.contextHelper = ContextHelper(context)
return context.contextHelper
def GetUUID():
return compose.Composition.GetUUID()
def createRSAKey():
#Create RSA key, 2048 bit
pk = crypto.PKey()
pk.generate_key(crypto.TYPE_RSA,2048)
assert pk.check()==True
return pk
def createECDSAKey(curve=ecdsa.NIST256p):
#Create ECDSA key
sk = ecdsa.SigningKey.generate(curve=curve)
return sk
def computeCryptoHash(data):
' This will currently return 128 hex characters'
# s = hashlib.sha3_256()
s = hashlib.shake_256()
s.update(data)
return s.digest(64)
def createCertRequest(pkey, digest="sha256", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for key, value in name.items():
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req
def createCertificate(req, issuerCertKey, serial, validityPeriod, digest="sha256"):
"""
Generate a certificate given a certificate request.
Arguments: req - Certificate request to use
issuerCert - The certificate of the issuer
issuerKey - The private key of the issuer
serial - Serial number for the certificate
notBefore - Timestamp (relative to now) when the certificate
starts being valid
notAfter - Timestamp (relative to now) when the certificate
stops being valid
digest - Digest method to use for signing, default is sha256
Returns: The signed certificate in an X509 object
"""
issuerCert, issuerKey = issuerCertKey
notBefore, notAfter = validityPeriod
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(notBefore)
cert.gmtime_adj_notAfter(notAfter)
cert.set_issuer(issuerCert.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(issuerKey, digest)
return cert
#SUBJECT_DEFAULT = {countryName : "US", stateOrProvinceName : "NC", localityName : "RTP", organizationName : "IBM", organizationalUnitName : "Blockchain"}
class Entity:
def __init__(self, name):
self.name = name
#Create a ECDSA key, then a crypto pKey from the DER for usage with cert requests, etc.
self.ecdsaSigningKey = createECDSAKey()
self.pKey = crypto.load_privatekey(crypto.FILETYPE_ASN1, self.ecdsaSigningKey.to_der())
# Signing related ecdsa config
self.hashfunc = hashlib.sha256
self.sigencode=ecdsa.util.sigencode_der_canonize
self.sigdecode=ecdsa.util.sigdecode_der
def createCertRequest(self, nodeName):
req = createCertRequest(self.pKey, CN=nodeName)
#print("request => {0}".format(crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)))
return req
def computeHash(self, data):
s = self.hashfunc()
s.update(data)
return s.digest()
def sign(self, dataAsBytearray):
return self.ecdsaSigningKey.sign(dataAsBytearray, hashfunc=self.hashfunc, sigencode=self.sigencode)
def verifySignature(self, signature, signersCert, data):
'Will verify the signature of an entity based upon public cert'
vk = ecdsa.VerifyingKey.from_der(crypto.dump_publickey(crypto.FILETYPE_ASN1, signersCert.get_pubkey()))
assert vk.verify(signature, data, hashfunc=self.hashfunc, sigdecode=self.sigdecode), "Invalid signature!!"
class User(Entity, orderer_util.UserRegistration):
def __init__(self, name):
Entity.__init__(self, name)
orderer_util.UserRegistration.__init__(self, name)
self.tags = {}
class Network(Enum):
Orderer = 1
Peer = 2
class Organization(Entity):
def __init__(self, name):
Entity.__init__(self, name)
req = createCertRequest(self.pKey, CN=name)
numYrs = 1
self.signedCert = createCertificate(req, (req, self.pKey), 1000, (0, 60*60*24*365*numYrs))
# Which networks this organization belongs to
self.networks = []
def getSelfSignedCert(self):
return self.signedCert
def getMSPConfig(self):
certPemsList = [crypto.dump_certificate(crypto.FILETYPE_PEM, self.getSelfSignedCert())]
# For now, admin certs and CA certs are the same per @ASO
adminCerts = certPemsList
cacerts = adminCerts
# Currently only 1 component, CN=<orgName>
# name = self.getSelfSignedCert().get_subject().getComponents()[0][1]
name = self.name
fabricMSPConfig = mspconfig_pb2.FabricMSPConfig(Admins=adminCerts, RootCerts=cacerts, Name=name)
mspConfig = mspconfig_pb2.MSPConfig(Config=fabricMSPConfig.SerializeToString(), Type=0)
return mspConfig
pass
def createCertificate(self, certReq):
numYrs = 1
return createCertificate(certReq, (self.signedCert, self.pKey), 1000, (0, 60*60*24*365*numYrs))
def addToNetwork(self, network):
'Used to track which network this organization is defined in.'
assert network in Network, 'Network not recognized ({0}), expected to be one of ({1})'.format(network, list(Network))
if not network in self.networks:
self.networks.append(network)
class Directory:
def __init__(self):
self.organizations = {}
self.users = {}
self.ordererAdminTuples = {}
def registerOrg(self, orgName, network):
assert orgName not in self.organizations, "Organization already registered {0}".format(orgName)
self.organizations[orgName] = Organization(orgName)
return self.organizations[orgName]
def registerUser(self, userName):
assert userName not in self.users, "User already registered {0}".format(userName)
self.users[userName] = User(userName)
return self.users[userName]
def getUser(self, userName, shouldCreate = False):
if not userName in self.users and shouldCreate:
self.users[userName] = User(userName)
return self.users[userName]
def getOrganization(self, orgName, shouldCreate = False):
if not orgName in self.organizations and shouldCreate:
self.organizations[orgName] = Organization(orgName)
return self.organizations[orgName]
def findCertByTuple(self, userName, contextName, orgName):
ordererAdminTuple = NodeAdminTuple(user = userName, nodeName = contextName, organization = orgName)
return self.ordererAdminTuples[ordererAdminTuple]
def findCertForNodeAdminTuple(self, nodeAdminTuple):
assert nodeAdminTuple in self.ordererAdminTuples, "Node admin tuple not found for: {0}".format(nodeAdminTuple)
return self.ordererAdminTuples[nodeAdminTuple]
def findNodeAdminTuple(self, userName, contextName, orgName):
nodeAdminTuple = NodeAdminTuple(user = userName, nodeName = contextName, organization = orgName)
assert nodeAdminTuple in self.ordererAdminTuples, "Node admin tuple not found for: {0}".format(nodeAdminTuple)
return nodeAdminTuple
def registerOrdererAdminTuple(self, userName, ordererName, organizationName):
' Assign the user as orderer admin'
ordererAdminTuple = NodeAdminTuple(user = userName, nodeName = ordererName, organization = organizationName)
assert ordererAdminTuple not in self.ordererAdminTuples, "Orderer admin tuple already registered {0}".format(ordererAdminTuple)
assert organizationName in self.organizations, "Orderer Organization not defined {0}".format(organizationName)
user = self.getUser(userName, shouldCreate = True)
certReq = user.createCertRequest(ordererAdminTuple.nodeName)
userCert = self.getOrganization(organizationName).createCertificate(certReq)
# Verify the newly created certificate
store = crypto.X509Store()
# Assuming a list of trusted certs
for trustedCert in [self.getOrganization(organizationName).signedCert]:
store.add_cert(trustedCert)
# Create a certificate context using the store and the certificate to verify
store_ctx = crypto.X509StoreContext(store, userCert)
# Verify the certificate, returns None if it can validate the certificate
store_ctx.verify_certificate()
self.ordererAdminTuples[ordererAdminTuple] = userCert
return ordererAdminTuple
class AuthDSLHelper:
@classmethod
def Envelope(cls, signaturePolicy, identities):
'Envelope builds an envelope message embedding a SignaturePolicy'
return common_dot_configuration_pb2.SignaturePolicyEnvelope(
Version=0,
Policy=signaturePolicy,
Identities=identities)
@classmethod
def NOutOf(cls, n, policies):
'NOutOf creates a policy which requires N out of the slice of policies to evaluate to true'
return common_dot_configuration_pb2.SignaturePolicy(
From=common_dot_configuration_pb2.SignaturePolicy.NOutOf(
N=n,
Policies=policies,
),
)
class BootstrapHelper:
KEY_CONSENSUS_TYPE = "ConsensusType"
KEY_CHAIN_CREATION_POLICY_NAMES = "ChainCreationPolicyNames"
KEY_ACCEPT_ALL_POLICY = "AcceptAllPolicy"
KEY_INGRESS_POLICY = "IngressPolicyNames"
KEY_EGRESS_POLICY = "EgressPolicyNames"
KEY_HASHING_ALGORITHM = "HashingAlgorithm"
KEY_BATCH_SIZE = "BatchSize"
KEY_BATCH_TIMEOUT = "BatchTimeout"
KEY_CREATIONPOLICY = "CreationPolicy"
KEY_MSP_INFO = "MSP"
KEY_ANCHOR_PEERS = "AnchorPeers"
KEY_NEW_CONFIGURATION_ITEM_POLICY = "NewConfigurationItemPolicy"
DEFAULT_CHAIN_CREATORS = [KEY_ACCEPT_ALL_POLICY]
DEFAULT_NONCE_SIZE = 24
def __init__(self, chainId = "TestChain", lastModified = 0, msgVersion = 1, epoch = 0, consensusType = "solo", batchSize = 10, batchTimeout="10s", absoluteMaxBytes=100000000, preferredMaxBytes=512*1024, signers=[]):
self.chainId = str(chainId)
self.lastModified = lastModified
self.msgVersion = msgVersion
self.epoch = epoch
self.consensusType = consensusType
self.batchSize = batchSize
self.batchTimeout = batchTimeout
self.absoluteMaxBytes = absoluteMaxBytes
self.preferredMaxBytes = preferredMaxBytes
self.signers = signers
@classmethod
def getNonce(cls):
return rand.bytes(BootstrapHelper.DEFAULT_NONCE_SIZE)
@classmethod
def addSignatureToSignedConfigItem(cls, signedConfigItem, (entity, cert)):
sigHeader = common_dot_common_pb2.SignatureHeader(creator=crypto.dump_certificate(crypto.FILETYPE_ASN1,cert),nonce=BootstrapHelper.getNonce())
sigHeaderBytes = sigHeader.SerializeToString()
# Signature over the concatenation of configurationItem bytes and signatureHeader bytes
signature = entity.sign(signedConfigItem.ConfigurationItem + sigHeaderBytes)
# Now add new signature to Signatures repeated field
newConfigSig = signedConfigItem.Signatures.add()
newConfigSig.signatureHeader=sigHeaderBytes
newConfigSig.signature=signature
def makeChainHeader(self, type = common_dot_common_pb2.HeaderType.Value("CONFIGURATION_ITEM"), txID = "", extension='', version = 1,
timestamp = timestamp_pb2.Timestamp(seconds = int(time.time()), nanos = 0)):
return common_dot_common_pb2.ChainHeader(type = type,
version = version,
timestamp = timestamp,
chainID = self.chainId,
epoch = self.epoch,
txID = txID,
extension = extension)
def makeSignatureHeader(self, serializeCertChain, nonce):
return common_dot_common_pb2.SignatureHeader(creator = serializeCertChain,
nonce = nonce)
def signConfigItem(self, configItem):
signedConfigItem = common_dot_configuration_pb2.SignedConfigurationItem(ConfigurationItem=configItem.SerializeToString(), Signatures=None)
return signedConfigItem
def getConfigItem(self, commonConfigType, key, value):
configItem = common_dot_configuration_pb2.ConfigurationItem(
Header=self.makeChainHeader(type=common_dot_common_pb2.HeaderType.Value("CONFIGURATION_ITEM")),
Type=commonConfigType,
LastModified=self.lastModified,
ModificationPolicy=BootstrapHelper.KEY_NEW_CONFIGURATION_ITEM_POLICY,
Key=key,
Value=value)
return configItem
def encodeAnchorInfo(self, ciValue):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Peer"),
key=BootstrapHelper.KEY_ANCHOR_PEERS,
value=ciValue.SerializeToString())
return self.signConfigItem(configItem)
def encodeMspInfo(self, mspUniqueId, ciValue):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("MSP"),
key=mspUniqueId,
value=ciValue.SerializeToString())
return self.signConfigItem(configItem)
def encodeHashingAlgorithm(self, hashingAlgorithm="SHAKE256"):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Chain"),
key=BootstrapHelper.KEY_HASHING_ALGORITHM,
value=common_dot_configuration_pb2.HashingAlgorithm(name=hashingAlgorithm).SerializeToString())
return self.signConfigItem(configItem)
def encodeBatchSize(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_BATCH_SIZE,
value=orderer_dot_configuration_pb2.BatchSize(maxMessageCount=self.batchSize, absoluteMaxBytes=self.absoluteMaxBytes, preferredMaxBytes=self.preferredMaxBytes).SerializeToString())
return self.signConfigItem(configItem)
def encodeBatchTimeout(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_BATCH_TIMEOUT,
value=orderer_dot_configuration_pb2.BatchTimeout(timeout=self.batchTimeout).SerializeToString())
return self.signConfigItem(configItem)
def encodeConsensusType(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_CONSENSUS_TYPE,
value=orderer_dot_configuration_pb2.ConsensusType(type=self.consensusType).SerializeToString())
return self.signConfigItem(configItem)
def encodeChainCreators(self, ciValue = orderer_dot_configuration_pb2.ChainCreationPolicyNames(names=DEFAULT_CHAIN_CREATORS).SerializeToString()):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_CHAIN_CREATION_POLICY_NAMES,
value=ciValue)
return self.signConfigItem(configItem)
def encodePolicy(self, key, policy=common_dot_configuration_pb2.Policy(type=common_dot_configuration_pb2.Policy.PolicyType.Value("SIGNATURE"), policy=AuthDSLHelper.Envelope(signaturePolicy=AuthDSLHelper.NOutOf(0,[]), identities=[]).SerializeToString())):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Policy"),
key=key,
value=policy.SerializeToString())
return self.signConfigItem(configItem)
def encodeEgressPolicy(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_EGRESS_POLICY,
value=orderer_dot_configuration_pb2.EgressPolicyNames(names=[BootstrapHelper.KEY_ACCEPT_ALL_POLICY]).SerializeToString())
return self.signConfigItem(configItem)
def encodeIngressPolicy(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_INGRESS_POLICY,
value=orderer_dot_configuration_pb2.IngressPolicyNames(names=[BootstrapHelper.KEY_ACCEPT_ALL_POLICY]).SerializeToString())
return self.signConfigItem(configItem)
def encodeAcceptAllPolicy(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Policy"),
key=BootstrapHelper.KEY_ACCEPT_ALL_POLICY,
value=common_dot_configuration_pb2.Policy(type=1, policy=AuthDSLHelper.Envelope(signaturePolicy=AuthDSLHelper.NOutOf(0,[]), identities=[]).SerializeToString()).SerializeToString())
return self.signConfigItem(configItem)
def lockDefaultModificationPolicy(self):
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Policy"),
key=BootstrapHelper.KEY_NEW_CONFIGURATION_ITEM_POLICY,
value=common_dot_configuration_pb2.Policy(type=1, policy=AuthDSLHelper.Envelope(signaturePolicy=AuthDSLHelper.NOutOf(1,[]), identities=[]).SerializeToString()).SerializeToString())
return self.signConfigItem(configItem)
def computeBlockDataHash(self, blockData):
return computeCryptoHash(blockData.SerializeToString())
def signInitialChainConfig(self, signedConfigItems, chainCreationPolicyName):
'Create a signedConfigItem using previous config items'
# Create byte array to store concatenated bytes
# concatenatedConfigItemsBytes = bytearray()
# for sci in signedConfigItems:
# concatenatedConfigItemsBytes = concatenatedConfigItemsBytes + bytearray(sci.ConfigurationItem)
# hash = computeCryptoHash(concatenatedConfigItemsBytes)
data = ''
for sci in signedConfigItems:
data = data + sci.ConfigurationItem
# Compute hash over concatenated bytes
hash = computeCryptoHash(data)
configItem = self.getConfigItem(
commonConfigType=common_dot_configuration_pb2.ConfigurationItem.ConfigurationType.Value("Orderer"),
key=BootstrapHelper.KEY_CREATIONPOLICY,
value=orderer_dot_configuration_pb2.CreationPolicy(policy=chainCreationPolicyName, digest=hash).SerializeToString())
return [self.signConfigItem(configItem)] + signedConfigItems
def signInitialChainConfig(signedConfigItems, chainId, chainCreationPolicyName):
bootstrapHelper = BootstrapHelper(chainId = chainId)
# Returns a list prepended with a signedConfiguration
signedConfigItems = bootstrapHelper.signInitialChainConfig(signedConfigItems, chainCreationPolicyName)
return common_dot_configuration_pb2.ConfigurationEnvelope(Items=signedConfigItems)
def getDirectory(context):
if 'bootstrapDirectory' not in context:
context.bootstrapDirectory = Directory()
return context.bootstrapDirectory
def getOrdererBootstrapAdmin(context, shouldCreate=False):
directory = getDirectory(context)
ordererBootstrapAdmin = directory.getUser(userName="ordererBootstrapAdmin", shouldCreate=shouldCreate)
return ordererBootstrapAdmin
def addOrdererBootstrapAdminOrgReferences(context, policyName, orgNames):
'Adds a key/value pair of policyName/[orgName,...]'
directory = getDirectory(context)
ordererBootstrapAdmin = directory.getUser(userName="ordererBootstrapAdmin", shouldCreate=False)
if not 'OrgReferences' in ordererBootstrapAdmin.tags:
ordererBootstrapAdmin.tags['OrgReferences'] = {}
policyNameToOrgNamesDict = ordererBootstrapAdmin.tags['OrgReferences']
assert not policyName in policyNameToOrgNamesDict, "PolicyName '{0}' already registered with ordererBootstrapAdmin".format(policyName)
policyNameToOrgNamesDict[policyName] = orgNames
return policyNameToOrgNamesDict
def getOrdererBootstrapAdminOrgReferences(context):
directory = getDirectory(context)
ordererBootstrapAdmin = directory.getUser(userName="ordererBootstrapAdmin", shouldCreate=False)
if not 'OrgReferences' in ordererBootstrapAdmin.tags:
ordererBootstrapAdmin.tags['OrgReferences'] = {}
return ordererBootstrapAdmin.tags['OrgReferences']
def getSignedMSPConfigItems(context, chainId, orgNames):
directory = getDirectory(context)
bootstrapHelper = BootstrapHelper(chainId=chainId)
orgs = [directory.getOrganization(orgName) for orgName in orgNames]
mspSignedConfigItems = [bootstrapHelper.encodeMspInfo(org.name, org.getMSPConfig()) for org in orgs]
return mspSignedConfigItems
def getSignedAnchorConfigItems(context, chainId, nodeAdminTuples):
directory = getDirectory(context)
bootstrapHelper = BootstrapHelper(chainId=chainId)
anchorPeers = peer_dot_configuration_pb2.AnchorPeers()
for nodeAdminTuple in nodeAdminTuples:
anchorPeer = anchorPeers.anchorPeers.add()
anchorPeer.Host=nodeAdminTuple.nodeName
anchorPeer.Port=5611
anchorPeer.Cert=crypto.dump_certificate(crypto.FILETYPE_PEM, directory.findCertForNodeAdminTuple(nodeAdminTuple))
anchorsSignedConfigItems = [bootstrapHelper.encodeAnchorInfo(anchorPeers)]
return anchorsSignedConfigItems
def getMspConfigItemsForPolicyNames(context, chainId, policyNames):
directory = getDirectory(context)
ordererBootstrapAdmin = getOrdererBootstrapAdmin(context)
policyNameToOrgNamesDict = getOrdererBootstrapAdminOrgReferences(context)
# Get unique set of org names and return set of signed MSP ConfigItems
orgNamesReferenced = list(set([orgName for policyName in policyNames for orgName in policyNameToOrgNamesDict[policyName]]))
orgNamesReferenced.sort()
return getSignedMSPConfigItems(context=context, chainId=chainId, orgNames=orgNamesReferenced)
def createSignedConfigItems(context, chainId, consensusType, signedConfigItems = []):
# directory = getDirectory(context)
# assert len(directory.ordererAdminTuples) > 0, "No orderer admin tuples defined!!!"
bootstrapHelper = BootstrapHelper(chainId = chainId, consensusType=consensusType)
configItems = signedConfigItems
configItems.append(bootstrapHelper.encodeHashingAlgorithm())
# configItems.append(bootstrapHelper.encodeBlockDataHashingStructure())
# configItems.append(bootstrapHelper.encodeOrdererAddresses())
configItems.append(bootstrapHelper.encodeBatchSize())
configItems.append(bootstrapHelper.encodeBatchTimeout())
configItems.append(bootstrapHelper.encodeConsensusType())
configItems.append(bootstrapHelper.encodeAcceptAllPolicy())
configItems.append(bootstrapHelper.encodeIngressPolicy())
configItems.append(bootstrapHelper.encodeEgressPolicy())
configItems.append(bootstrapHelper.lockDefaultModificationPolicy())
return configItems
def createConfigTxEnvelope(chainId, signedConfigEnvelope):
bootstrapHelper = BootstrapHelper(chainId = chainId)
payloadChainHeader = bootstrapHelper.makeChainHeader(type=common_dot_common_pb2.HeaderType.Value("CONFIGURATION_TRANSACTION"))
#Now the SignatureHeader
serializedCreatorCertChain = None
nonce = None
payloadSignatureHeader = common_dot_common_pb2.SignatureHeader(
creator=serializedCreatorCertChain,
nonce=bootstrapHelper.getNonce(),
)
payloadHeader = common_dot_common_pb2.Header(
chainHeader=payloadChainHeader,
signatureHeader=payloadSignatureHeader,
)
payload = common_dot_common_pb2.Payload(header=payloadHeader, data=signedConfigEnvelope.SerializeToString())
envelope = common_dot_common_pb2.Envelope(payload=payload.SerializeToString(), signature=None)
return envelope
def createGenesisBlock(context, chainId, consensusType, signedConfigItems = []):
'Generates the genesis block for starting the oderers and for use in the chain config transaction by peers'
#assert not "bootstrapGenesisBlock" in context,"Genesis block already created:\n{0}".format(context.bootstrapGenesisBlock)
directory = getDirectory(context)
assert len(directory.ordererAdminTuples) > 0, "No orderer admin tuples defined!!!"
configItems = createSignedConfigItems(context, chainId, consensusType, signedConfigItems = signedConfigItems)
bootstrapHelper = BootstrapHelper(chainId = chainId, consensusType=consensusType)
configEnvelope = common_dot_configuration_pb2.ConfigurationEnvelope(Items=configItems)
envelope = createConfigTxEnvelope(chainId, configEnvelope)
blockData = common_dot_common_pb2.BlockData(Data=[envelope.SerializeToString()])
# Spoke with kostas, for orderer in general
signaturesMetadata = ""
lastConfigurationBlockMetadata = common_dot_common_pb2.Metadata(value=common_dot_common_pb2.LastConfiguration(index=0).SerializeToString()).SerializeToString()
ordererConfigMetadata = ""
transactionFilterMetadata = ""
block = common_dot_common_pb2.Block(
Header=common_dot_common_pb2.BlockHeader(
Number=0,
PreviousHash=None,
DataHash=bootstrapHelper.computeBlockDataHash(blockData),
),
Data=blockData,
Metadata=common_dot_common_pb2.BlockMetadata(Metadata=[signaturesMetadata,lastConfigurationBlockMetadata,transactionFilterMetadata, ordererConfigMetadata]),
)
# Add this back once crypto certs are required
for nodeAdminTuple in directory.ordererAdminTuples:
userCert = directory.ordererAdminTuples[nodeAdminTuple]
certAsPEM = crypto.dump_certificate(crypto.FILETYPE_PEM, userCert)
# print("UserCert for orderer genesis:\n{0}\n".format(certAsPEM))
# print("")
return (block, envelope)
class PathType(Enum):
'Denotes whether Path relative to Local filesystem or Containers volume reference.'
Local = 1
Container = 2
class OrdererGensisBlockCompositionCallback(compose.CompositionCallback):
'Responsible for setting the GensisBlock for the Orderer nodes upon composition'
def __init__(self, context, genesisBlock, genesisFileName = "genesis_file"):
self.context = context
self.genesisFileName = genesisFileName
self.genesisBlock = genesisBlock
self.volumeRootPathInContainer="/var/hyperledger/bddtests"
compose.Composition.RegisterCallbackInContext(context, self)
def getVolumePath(self, composition, pathType=PathType.Local):
assert pathType in PathType, "Expected pathType of {0}".format(PathType)
basePath = "."
if pathType == PathType.Container:
basePath = self.volumeRootPathInContainer
return "{0}/volumes/orderer/{1}".format(basePath, composition.projectName)
def getGenesisFilePath(self, composition, pathType=PathType.Local):
return "{0}/{1}".format(self.getVolumePath(composition, pathType), self.genesisFileName)
def composing(self, composition, context):
print("Will copy gensisiBlock over at this point ")
os.makedirs(self.getVolumePath(composition))
with open(self.getGenesisFilePath(composition), "wb") as f:
f.write(self.genesisBlock.SerializeToString())
def decomposing(self, composition, context):
'Will remove the orderer volume path folder for the context'
shutil.rmtree(self.getVolumePath(composition))
def getEnv(self, composition, context, env):
env["ORDERER_GENERAL_GENESISMETHOD"]="file"
env["ORDERER_GENERAL_GENESISFILE"]=self.getGenesisFilePath(composition, pathType=PathType.Container)
class PeerCompositionCallback(compose.CompositionCallback):
'Responsible for setting up Peer nodes upon composition'
def __init__(self, context):
self.context = context
self.volumeRootPathInContainer="/var/hyperledger/bddtests"
compose.Composition.RegisterCallbackInContext(context, self)
def getVolumePath(self, composition, pathType=PathType.Local):
assert pathType in PathType, "Expected pathType of {0}".format(PathType)
basePath = "."
if pathType == PathType.Container:
basePath = self.volumeRootPathInContainer
return "{0}/volumes/peer/{1}".format(basePath, composition.projectName)
def getPeerList(self, composition):
return [serviceName for serviceName in composition.getServiceNames() if "peer" in serviceName]
def getLocalMspConfigPath(self, composition, peerService, pathType=PathType.Local):
return "{0}/{1}/localMspConfig".format(self.getVolumePath(composition, pathType), peerService)
def _createLocalMspConfigDirs(self, mspConfigPath):
os.makedirs("{0}/{1}".format(mspConfigPath, "signcerts"))
os.makedirs("{0}/{1}".format(mspConfigPath, "admincerts"))
os.makedirs("{0}/{1}".format(mspConfigPath, "cacerts"))
os.makedirs("{0}/{1}".format(mspConfigPath, "keystore"))
def composing(self, composition, context):
'Will copy local MSP info over at this point for each peer node'
directory = getDirectory(context)
for peerService in self.getPeerList(composition):
localMspConfigPath = self.getLocalMspConfigPath(composition, peerService)
self._createLocalMspConfigDirs(localMspConfigPath)
# Loop through directory and place Peer Organization Certs into cacerts folder
for peerOrg in [org for orgName,org in directory.organizations.items() if Network.Peer in org.networks]:
with open("{0}/cacerts/{1}.pem".format(localMspConfigPath, peerOrg.name), "w") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, peerOrg.getSelfSignedCert()))
# Loop through directory and place Peer Organization Certs into admincerts folder
#TODO: revisit this, ASO recommended for now
for peerOrg in [org for orgName,org in directory.organizations.items() if Network.Peer in org.networks]:
with open("{0}/admincerts/{1}.pem".format(localMspConfigPath, peerOrg.name), "w") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, peerOrg.getSelfSignedCert()))
# Find the peer signer Tuple for this peer and add to signcerts folder
for pnt, cert in [(peerNodeTuple,cert) for peerNodeTuple,cert in directory.ordererAdminTuples.items() if peerService in peerNodeTuple.user and "signer" in peerNodeTuple.user.lower()]:
# Put the PEM file in the signcerts folder
with open("{0}/signcerts/{1}.pem".format(localMspConfigPath, pnt.user), "w") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
# Put the associated private key into the keystore folder
user = directory.getUser(pnt.user, shouldCreate=False)
with open("{0}/keystore/{1}.pem".format(localMspConfigPath, pnt.user), "w") as f:
f.write(user.ecdsaSigningKey.to_pem())
# f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, user.pKey))
def decomposing(self, composition, context):
'Will remove the orderer volume path folder for the context'
shutil.rmtree(self.getVolumePath(composition))
def getEnv(self, composition, context, env):
for peerService in self.getPeerList(composition):
localMspConfigPath = self.getLocalMspConfigPath(composition, peerService, pathType=PathType.Container)
env["{0}_CORE_PEER_MSPCFGPATH".format(peerService.upper())]=localMspConfigPath
def createChainCreationPolicyNames(context, chainCreationPolicyNames, chaindId):
directory = getDirectory(context)
bootstrapHelper = BootstrapHelper(chainId = chaindId)
chainCreationPolicyNamesSignedConfigItem = bootstrapHelper.encodeChainCreators(ciValue = orderer_dot_configuration_pb2.ChainCreationPolicyNames(names=chainCreationPolicyNames).SerializeToString())
return chainCreationPolicyNamesSignedConfigItem
def createChainCreatorsPolicy(context, chainCreatePolicyName, chaindId, orgNames):
'Creates the chain Creator Policy with name'
directory = getDirectory(context)
bootstrapHelper = BootstrapHelper(chainId = chaindId)
# This represents the domain of organization which can create channels for the orderer
# First create org MSPPrincicpal
# Collect the orgs from the table
mspPrincipalList = []
for org in [directory.getOrganization(orgName) for orgName in orgNames]:
mspPrincipalList.append(msp_principal_pb2.MSPPrincipal(PrincipalClassification=msp_principal_pb2.MSPPrincipal.Classification.Value("ByIdentity"),
Principal=crypto.dump_certificate(crypto.FILETYPE_ASN1, org.getSelfSignedCert())))
policyTypeSig = common_dot_configuration_pb2.Policy.PolicyType.Value("SIGNATURE")
chainCreatorsOrgsPolicySignedConfigItem = bootstrapHelper.encodePolicy(key=chainCreatePolicyName , policy=common_dot_configuration_pb2.Policy(type=policyTypeSig, policy=AuthDSLHelper.Envelope(signaturePolicy=AuthDSLHelper.NOutOf(0,[]), identities=mspPrincipalList).SerializeToString()))
# print("signed Config Item:\n{0}\n".format(chainCreationPolicyNamesSignedConfigItem))
#print("chain Creation orgs signed Config Item:\n{0}\n".format(chainCreatorsOrgsPolicySignedConfigItem))
return chainCreatorsOrgsPolicySignedConfigItem
def setOrdererBootstrapGenesisBlock(genesisBlock):
'Responsible for setting the GensisBlock for the Orderer nodes upon composition'
def broadcastCreateChannelConfigTx(context, composeService, chainId, configTxEnvelope, user):
dataFunc = lambda x: configTxEnvelope
user.broadcastMessages(context=context,numMsgsToBroadcast=1,composeService=composeService, chainID=chainId ,dataFunc=dataFunc, chainHeaderType=common_dot_common_pb2.CONFIGURATION_TRANSACTION)
def getArgsFromContextForUser(context, userName):
directory = getDirectory(context)
# Update the chaincodeSpec ctorMsg for invoke
args = []
if 'table' in context:
if context.table:
# There are function arguments
user = directory.getUser(context, userName)
# Allow the user to specify expressions referencing tags in the args list
pattern = re.compile('\{(.*)\}$')
for arg in context.table[0].cells:
m = pattern.match(arg)
if m:
# tagName reference found in args list
tagName = m.groups()[0]
# make sure the tagName is found in the users tags
assert tagName in user.tags, "TagName '{0}' not found for user '{1}'".format(tagName, user.getUserName())
args.append(user.tags[tagName])
else:
#No tag referenced, pass the arg
args.append(arg)
return args
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""百度个人云存储(PCS)Python SDK"""
__title__ = 'baidupcs'
__version__ = '0.3.2'
__author__ = 'mozillazg'
__license__ = 'MIT'
__copyright__ = 'Copyright (c) 2014 mozillazg'
from .api import PCS, InvalidToken
|
import json
import sqlite3
import sys
FILE = sys.argv[1]
QUERY = """
SELECT name n, admin1 a, country c, round(latitude, 4) lat, round(longitude, 4) lon
FROM geoname
WHERE fcode like 'PPL%'
AND population > 5000;
"""
db = sqlite3.connect(FILE)
cur = db.cursor()
cur.execute(QUERY)
r = [dict((cur.description[i][0], value) for i, value in enumerate(row))
for row in cur.fetchall()]
print 'gaz=%s' % (json.dumps(r, separators=(',', ':')),)
|
from config import CRYPTO_TIMESERIES_INDEX_NAME, DYNAMODB_TABLE_NAME
from infrastructure.dynamodb import CryptoMarketDataGateway
def create_market_data_gateway():
return CryptoMarketDataGateway(DYNAMODB_TABLE_NAME, CRYPTO_TIMESERIES_INDEX_NAME) |
from controllers.login_controller import LoginController
class AppController():
def __init__(self,app,user) -> None:
self.app=app
self.user=user
self.login_controller=LoginController(app)
self.app.login_view.login_btn.fbind("on_press",self.on_login)
def on_login(self, *args):
username,password,password2=self.app.login_view.get_data()
result=self.login_controller.login(username,password,password2)
if result:
self.user.set_user(username,password)
self.app.switch_to_welcome()
else:
self.app.login_view.set_status((1,0,0,1),"Something is wrong")
def start(self)->None:
self.app.run()
|
# Link --> https://www.hackerrank.com/challenges/binary-search-tree-insertion/problem
# Code:
def insert(self, val):
if self.root is None:
self.root = Node(val)
else:
current = self.root
while True:
if current.info > val:
if current.left:
current = current.left
else:
current.left = Node(val)
break
else:
if current.right:
current = current.right
else:
current.right = Node(val)
break
|
import os
from typing import Type
from gps import *
from time import *
import time
import threading
from collections import namedtuple
#from flask import Flask
#from flask_restplus import Api, Resource
#
#flask_app = Flask(__name__)
#app = Api(app=flask_app)
gpsd = None # setting the global variable
gd = None
# os.system('clear') # clear the terminal (optional)
Gd = namedtuple( 'Gd', 'lat long time alt eps epx epv ept ms_speed climb track mode sats' )
#name_space = app.namespace('main', description='API Description (swagger) ')
class MainClass(Resource):
def get(self):
return {
"status": "Got new GPS data:\n"+
json.dumps(gd._asdict())
}
def post(self):
return {
"status": "Posted new GPS command/data"
}
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd # bring it in scope
global gd
gpsd = gps(mode=WATCH_ENABLE) # starting the stream of info
self.current_value = None
self.running = True # setting the thread running to true
def run(self):
global gpsd
global gd
while gpsp.running:
gd = gpsd.next() # this will continue to loop and grab EACH set of gpsd info to clear the buffer
if __name__ == '__main__':
gpsp = GpsPoller() # create the thread
try:
gpsp.start() # start it up
# while True:
# It may take a second or two to get good data
# print( gpsd.fix.latitude,', ',gpsd.fix.longitude,' Time: ',gpsd.utc
# os.system('clear')
gd = Gd(
gpsd.fix.latitude, gpsd.fix.longitude,
'%s' % gpsd.utc + gpsd.fix.time,
gpsd.fix.altitude,
gpsd.fix.eps, gpsd.fix.epx, gpsd.fix.epv, gpsd.fix.ept,
gpsd.fix.speed,
gpsd.fix.climb, gpsd.fix.track,
gpsd.fix.mode,
gpsd.satellites
)
print()
print(' GPS reading')
print('----------------------------------------')
print('latitude ', gpsd.fix.latitude)
print('longitude ', gpsd.fix.longitude)
print('time utc ', gpsd.utc, ' + ', gpsd.fix.time)
print('altitude (m)', gpsd.fix.altitude)
print('eps ', gpsd.fix.eps)
print('epx ', gpsd.fix.epx)
print('epv ', gpsd.fix.epv)
print('ept ', gpsd.fix.ept)
print('speed (m/s) ', gpsd.fix.speed)
print('climb ', gpsd.fix.climb)
print('track ', gpsd.fix.track)
print('mode ', gpsd.fix.mode)
print()
print('sats ', gpsd.satellites)
time.sleep(5) # set to whatever
except (KeyboardInterrupt, SystemExit): # when you press ctrl+c
print("\nKilling Thread...")
gpsp.running = False
gpsp.join() # wait for the thread to finish what it's doing
print("Done.\nExiting.")
|
"""Holds the genetic approach for the solution of the problem.
Classes:
GeneticAlgorithm
"""
import copy
import random
import statistics
from timeit import default_timer as timer
import delivery.algorithm.operation.mutation as mutations
from delivery.algorithm.algorithm import Algorithm
from delivery.algorithm.genetic.chromosome import Chromosome
from delivery.algorithm.genetic.crossover import OnePointCrossover, OrderCrossover
from delivery.algorithm.genetic.selection import RoulleteSelection, TournamentSelection
class GeneticAlgorithm(Algorithm):
"""Problem solver resorting to genetic algorithm.
In genetic algorithms, solutions (called chromosomes) suffer mutations and are crossover between themselves,
so that new solutions are generated.
Holds its simulation, time and iterations ceil, parent selection method, population size and if it is generational
or iterative.
"""
def __init__(self, simulation, time=None, iterations=None, max_improveless_iterations=20, selection_method=None,
crossover=None, population_size=30, generational=True, mutation_probability=0.2, log=True, save_results=False):
"""Instantiates a genetic algorithm.
...
Args:
simulation (Simulation): The problem simulation
time (real): The max time in seconds the algorithm should take
iterations (integer): The max number of iterations the algorithm should take
max_improveless_iterations (integer): The max number of iterations without improvement the algorithm should
take
selection_method (SelectionMethod): The parent selection method, this is, the method used to select parents
for crossover. Roullete selection if none is passed
crossover (Crossover): The crossover method to reproduce between two chromosomes. The default is the order
crossover
population_size (integer): The size of the population. The default is 30
generational (boolean): Identifies if the offsprings should replace the parents per iteractions. If true,
the number of offsprings generated is the same as the size of the population and
this offsprings replace the old population. If false, when one offspring i
generated, the weakest chromosome in the population is removed to give space to the
new offspring. The default is true
"""
super().__init__(simulation, time, iterations, max_improveless_iterations)
self.selection_method = \
TournamentSelection(
population_size // 3) if selection_method is None else selection_method
self.crossover = OrderCrossover() if crossover is None else crossover
self.population_size = population_size
self.generational = generational
self.mutation_probability = mutation_probability
self.mutations = [mutations.exchange_positions, mutations.modify_drone]
self.log = log
self.save_results = save_results
if self.save_results:
self.results_file = open(
f'results_{str(timer())}.log', 'a')
if self.generational:
self.results_file.write(
'Generation;Generation max fitness;Generation min fitness;Generation average fitness;Global max fitness\n')
else:
self.results_file.write(
'Iteration;Fitness\n')
def run(self):
"""Runs the genetic algorithm.
...
Returns:
Chromosome: The best chromosome solution
"""
self.starting_time = timer()
# builds the initial solution
if self.log:
print('Building initial solution')
population = self.random_population()
best_solution = self.best_solution(population)
if self.log:
print('Starting reproduction')
self.starting_time = timer()
self.iterations = 0
self.improveless_iterations = 0
while not self.stop():
self.iterations += 1
if (self.generational):
# creates the required number of offsprings and replaces the old population
population = self.__new_generation(population)
new_best = self.best_solution(population)
if self.iterations % 10 == 0 and self.save_results:
average_fitness, min_fitness = self.__population_statistics(
population)
self.results_file.write(
f'{self.iterations};{new_best.fitness};{min_fitness};{average_fitness};{best_solution.fitness}\n')
if self.log:
print(
f'Generation {self.iterations} with max fitness {new_best.fitness} and global fitness {best_solution.fitness}')
if new_best.fitness > best_solution.fitness:
best_solution = new_best
self.improveless_iterations = 0
else:
self.improveless_iterations += 1
else:
# creates one offspring, adds it to the population, and removes the worst offspring in there
parent1, parent2 = self.selection_method.run(population)
offspring1, offspring2 = self.crossover.run(
parent1.solution, parent2.solution)
offspring1 = self.mutate(offspring1)
offspring2 = self.mutate(offspring2)
c1 = Chromosome(offspring1, self.evaluate(offspring1))
c2 = Chromosome(offspring2, self.evaluate(offspring2))
population.append(c1)
population.append(c2)
population.remove(
min(population, key=lambda chromosome: chromosome.fitness))
population.remove(
min(population, key=lambda chromosome: chromosome.fitness))
new_best = max(
[c1, c2], key=lambda chromosome: chromosome.fitness)
if self.log:
print(
f'Iteration {self.iterations} with max fitness {new_best.fitness} and global fitness {best_solution.fitness}')
if new_best.fitness > best_solution.fitness:
best_solution = new_best
self.improveless_iterations = 0
else:
self.improveless_iterations += 1
if self.iterations % 1000 == 0 and self.save_results:
self.results_file.write(
f'{self.iterations};{best_solution.fitness}\n')
# gets the best solution of the current population and returns it
return best_solution
def mutate(self, chromosome):
probability = random.uniform(0, 1)
if probability <= self.mutation_probability:
mutation = random.randrange(0, len(self.mutations))
mutation_function = self.mutations[mutation]
mutated_chromosome = chromosome
# 1 operation or 1 per 100 genes in a chromosome
max_op_count = max(1, len(chromosome) // 100)
op_count = random.randint(1, max_op_count)
for _ in range(0, op_count):
mutated_chromosome = mutation_function(
chromosome, self.simulation)
return mutated_chromosome
return chromosome
def random_population(self):
"""Builds a random population with chromosomes with random solutions.
...
Returns:
list[Chromosome]: A list of chromosomes with random solutions.
"""
res = []
# creates an initial solution
solution = self.random_solution()
chromosome = Chromosome(solution, self.evaluate(solution))
res.append(chromosome)
for _ in range(self.population_size - 1):
# creates new solutions based on the initial one
# changing its order and its drones
new_solution = []
for transportation in solution:
new_transportation = copy.copy(transportation)
new_transportation.drone = self.simulation.random_drone()
new_solution.append(new_transportation)
chromosome = Chromosome(new_solution, self.evaluate(new_solution))
res.append(chromosome)
solution = new_solution
return res
def best_solution(self, population):
"""Gets the best chromosome in a population
...
Args:
population (list[Chromosome]): The list of Chromosomes that constitute the population
Returns:
Chromosome: The best chromosome
"""
return max(population, key=lambda chromosome: chromosome.fitness)
def __population_statistics(self, population):
total_fitness = 0
min_fitness = population[0].fitness
for chromosome in population:
chromosome_fitness = chromosome.fitness
total_fitness += chromosome_fitness
min_fitness = min(min_fitness, chromosome_fitness)
return total_fitness / len(population), min_fitness
def __new_generation(self, population):
"""Gets a new generation using crossover between Chromosomes in a given population.
...
Args:
population (list[Chromosome]): The current population
Returns:
list[Chromosome]: A new Chromosome generation
"""
new_population = []
for _ in range(self.population_size // 2):
parent1, parent2 = self.selection_method.run(population)
offspring1, offspring2 = self.crossover.run(
parent1.solution, parent2.solution)
offspring1 = self.mutate(offspring1)
offspring2 = self.mutate(offspring2)
c1 = Chromosome(offspring1, self.evaluate(offspring1))
c2 = Chromosome(offspring2, self.evaluate(offspring2))
new_population.append(c1)
new_population.append(c2)
return new_population
|
from gevent import monkey; monkey.patch_all()
import bottle
from bottle import request, response
from datetime import datetime
from multiprocessing import Queue
from queue import Empty
import os
import gevent
import json
TEMPLATE_ROOT = os.path.join(os.path.dirname(__file__), 'client')
class WebServer(object):
def __init__(self, q=None, runner=None):
self.q = q or Queue()
self.runner = runner
self.latest_status = 'idle'
self.latest_result = {}
bottle.route('/')(self.handle_home)
bottle.route('/resources/<filepath:path>')(self.handle_serve_static)
bottle.route('/status')(self.handle_status)
bottle.route('/status/poll')(self.handle_status_poll)
bottle.route('/watch')(self.handle_watch)
bottle.route('/latest')(self.handle_latest)
bottle.route('/execute')(self.handle_execute)
def set_test_runner(self, runner):
self.runner = runner
def notify_test_executing(self):
self.q.put(('executing', None))
def notify_test_completed(self, result):
self.q.put(('idle', json.dumps(result, default=lambda o: str(o))))
def run(self):
bottle.run(app=bottle.app(), host='127.0.0.1', port=8000, quiet=True)
def handle_home(self):
return bottle.static_file('index.html', root=TEMPLATE_ROOT)
def handle_serve_static(self, filepath):
return bottle.static_file(filepath, root=os.path.join(TEMPLATE_ROOT, 'resources'))
def handle_status(self):
return 'executing'
def handle_status_poll(self):
timeout = int(request.query.get('timeout', 120000)) / 1000
try:
status, result = self.q.get(timeout=timeout-10)
self.latest_status = status
if result is not None:
self.latest_result = json.loads(result)
return status
except Empty:
return 'idle'
def handle_watch(self):
return os.getcwd()
def handle_latest(self):
if 'tests' not in self.latest_result:
return json.dumps({})
revision = self.latest_result['created']
packages = {}
for test in self.latest_result['tests']:
filename = os.path.basename(test['path'])
dirname = os.path.dirname(test['path'])
if dirname not in packages:
packages[dirname] = dict(
PackageName=dirname,
Coverage=0.0,
Elapsed=0.0,
Outcome='passed',
BuildOutput='',
TestResults=[],
)
elapsed = test['setup']['duration'] + test['call']['duration'] + test['teardown']['duration']
logs = [x['msg'] for x in test['setup']['log'] + test['call']['log'] + test['teardown']['log']]
test_results = dict(
File=filename,
Line=test['lineno'],
Message='<br/>'.join(logs),
Passed=test['outcome']=='passed',
Skipped=test['outcome']=='skipped',
Stories=[],
TestName=test['domain'],
Error=test['call'].get('longrepr', ''),
Elapsed=elapsed,
)
packages[dirname]['Elapsed'] += elapsed
packages[dirname]['TestResults'].append(test_results)
return json.dumps(dict(
Packages=packages,
Paused=False,
Revision=revision,
))
def handle_execute(self):
if self.runner is not None:
self.runner('web ui')
if __name__ == '__main__':
server = WebServer(Queue())
server.run()
|
# Copyright (c) 2013 - 2019 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
from unittest import TestCase
import requests
import requests_mock
from yawast.scanner.plugins.http import http_basic
from yawast.scanner.plugins.http.http_basic import get_cookie_issues
class TestGetCookieIssues(TestCase):
def test__get_cookie_issues_no_sec_no_tls(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "sessionid=38afes7a8; HttpOnly; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(0, len(res))
def test__get_cookie_issues_sec_no_tls(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "sessionid=38afes7a8; HttpOnly; Secure; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Cookie Secure Flag Invalid (over HTTP)", res[0].message)
def test__get_cookie_issues_no_sec_ssn(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "https://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "sessionid=38afes7a8; HttpOnly; SameSite=None; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(2, len(res))
self.assertIn("Cookie Missing Secure Flag", res[0].message)
self.assertIn(
"Cookie SameSite=None Flag Invalid (without Secure flag)", res[1].message
)
def test__get_cookie_issues_ssn(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "https://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "sessionid=38afes7a8; HttpOnly; Secure; SameSite=None; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
def test__get_cookie_issues_no_sec(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "https://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "sessionid=38afes7a8; HttpOnly; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Cookie Missing Secure Flag", res[0].message)
def test__get_cookie_issues_no_ho(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={"Set-Cookie": "sessionid=38afes7a8; SameSite=Lax; Path=/"},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Cookie Missing HttpOnly Flag", res[0].message)
def test__get_cookie_issues_no_ss(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "https://example.com"
m.get(
url,
text="body",
headers={"Set-Cookie": "sessionid=38afes7a8; Secure; HttpOnly; Path=/"},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Cookie Missing SameSite Flag", res[0].message)
def test__get_cookie_bigip_1(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "BIGipServerWEB=2263487148.3013.0000; HttpOnly; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Big-IP Internal IP Address Disclosure", res[0].message)
def test__get_cookie_bigip_2(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "BIGipServerWEB=rd5o00000000000000000000ffffc0000201o80; HttpOnly; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Big-IP Internal IP Address Disclosure", res[0].message)
def test__get_cookie_bigip_3(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "BIGipServerWEB=vi20010112000000000000000000000030.20480; HttpOnly; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Big-IP Internal IP Address Disclosure", res[0].message)
def test__get_cookie_bigip_4(self):
http_basic.reset()
with requests_mock.Mocker() as m:
url = "http://example.com"
m.get(
url,
text="body",
headers={
"Set-Cookie": "BIGipServerWEB=rd3o20010112000000000000000000000030o80; HttpOnly; SameSite=Lax; Path=/"
},
)
resp = requests.get(url)
res = get_cookie_issues(resp, url)
self.assertEqual(1, len(res))
self.assertIn("Big-IP Internal IP Address Disclosure", res[0].message)
|
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name="allreverso",
version="1.2.7",
description="A simple package to handle allreverso.net services (translation, voice, dictionary etc.).",
long_description=long_description,
long_description_content_type="text/markdown",
author="PetitPotiron",
packages=["allreverso"],
install_requires=["requests", "bs4", "lxml"],
license="Apache 2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
project_urls={
'Homepage': 'https://github.com/PetitPotiron/allreverso',
}
)
|
import train
while True:
t = train.Train(1)
t.main() |
from django.apps import AppConfig
class AsynctasksConfig(AppConfig):
name = 'asyncTasks'
|
# -*- coding: utf-8 -*-
from timeset.timeset import TimeSet, TimeInterval
__all__= [
'TimeInterval',
'TimeSet'
]
|
import logging
# Just sets up logging, nothing to do with the networking aspect of this library
logger = logging.getLogger("Penguin")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s", "%I:%M:%S")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler) |
# Copyright 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.forms import Widget
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
class JsonPairInputs(Widget):
"""A widget that displays JSON Key Value Pairs
as a list of text input box pairs
Usage (in forms.py) :
examplejsonfield = forms.CharField(label = "Example JSON Key Value Field", required = False,
widget = JsonPairInputs(val_attrs={'size':35},
key_attrs={'class':'large'}))
"""
def __init__(self, *args, **kwargs):
"""A widget that displays JSON Key Value Pairs
as a list of text input box pairs
kwargs:
key_attrs -- html attributes applied to the 1st input box pairs
val_attrs -- html attributes applied to the 2nd input box pairs
"""
self.key_attrs = {}
self.val_attrs = {}
if "key_attrs" in kwargs:
self.key_attrs = kwargs.pop("key_attrs")
if "val_attrs" in kwargs:
self.val_attrs = kwargs.pop("val_attrs")
Widget.__init__(self, *args, **kwargs)
def render(self, name, value, attrs=None, renderer=None):
"""Renders this widget into an html string
args:
name (str) -- name of the field
value (str) -- a json string of a two-tuple list automatically passed in by django
attrs (dict) -- automatically passed in by django (unused in this function)
"""
if (not value) or value.strip() == "":
value = '{"":""}'
# twotuple = json.loads(force_unicode(value))
twotuple = json.loads(value)
if isinstance(twotuple, dict):
twotuple = [(k, v,) for k, v in twotuple.items()]
if not twotuple:
twotuple = [("", "")]
ret = ""
if value and len(value) > 0:
for k, v in twotuple:
ctx = {
"key": k,
"value": v,
"fieldname": name,
"key_attrs": flatatt(self.key_attrs),
"val_attrs": flatatt(self.val_attrs),
}
ret += (
"""
<div class="form-group" id="">
<div class="col-md-4">
<input placeholder="Key" class="form-control" type="text" name="json_key[%(fieldname)s]" value="%(key)s" %(key_attrs)s>
</div>
<div class="col-md-1" style="font-size: 2em; text-align: center;">
=
</div>
<div class="col-md-5">
<input placeholder="Value" class="form-control" type="text" name="json_value[%(fieldname)s]" value="%(value)s" %(val_attrs)s>
</div>
<div class="col-md-2 btn-group" role="group" aria-label="...">
<a class="btn btn-large btn-success">
<i class="glyphicon glyphicon-plus"></i>
</a>
<a class="btn btn-large btn-danger">
<i class="glyphicon glyphicon-minus"></i>
</a>
</div>
<div class="clearfix"></div>
</div>
"""
% ctx
)
ret = '<span id="metadata_fields">' + ret + "</span>"
return mark_safe(ret)
def value_from_datadict(self, data, files, name):
"""
Returns the json representation of the key-value pairs
sent in the POST parameters
args:
data (dict) -- request.POST or request.GET parameters
files (list) -- request.FILES
name (str) -- the name of the field associated with this widget
"""
jsontext = ""
if "json_key[%s]" % name in data and "json_value[%s]" % name in data:
keys = data.getlist("json_key[%s]" % name)
values = data.getlist("json_value[%s]" % name)
twotuple = []
for key, value in zip(keys, values):
if len(key) > 0:
twotuple += [(key, value)]
jsontext = json.dumps(twotuple)
return jsontext
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.