hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a247bd11d82f9ea0cd74cd38836b820c3903839
| 5,048 |
py
|
Python
|
interpretable_ddts/runfiles/gym_runner.py
|
CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020
|
a7fde4d2a7d70477b2e6c96b140f8c6587f78791
|
[
"MIT"
] | 5 |
2021-08-11T14:58:36.000Z
|
2022-02-12T06:12:19.000Z
|
interpretable_ddts/runfiles/gym_runner.py
|
CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020
|
a7fde4d2a7d70477b2e6c96b140f8c6587f78791
|
[
"MIT"
] | null | null | null |
interpretable_ddts/runfiles/gym_runner.py
|
CORE-Robotics-Lab/Interpretable_DDTS_AISTATS2020
|
a7fde4d2a7d70477b2e6c96b140f8c6587f78791
|
[
"MIT"
] | 4 |
2020-10-21T03:57:52.000Z
|
2021-06-28T08:08:05.000Z
|
# Created by Andrew Silva on 8/28/19
import gym
import numpy as np
import torch
from interpretable_ddts.agents.ddt_agent import DDTAgent
from interpretable_ddts.agents.mlp_agent import MLPAgent
from interpretable_ddts.opt_helpers.replay_buffer import discount_reward
import torch.multiprocessing as mp
import argparse
import copy
import random
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt')
parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000)
parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8)
parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0)
parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart')
parser.add_argument("-gpu", help="run on GPU?", action='store_true')
args = parser.parse_args()
AGENT_TYPE = args.agent_type # 'ddt', 'mlp'
NUM_EPS = args.episodes # num episodes Default 1000
ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'
USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false
if ENV_TYPE == 'lunar':
init_env = gym.make('LunarLander-v2')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
elif ENV_TYPE == 'cart':
init_env = gym.make('CartPole-v1')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
else:
raise Exception('No valid environment selected')
print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ")
# mp.set_start_method('spawn')
mp.set_sharing_strategy('file_system')
for i in range(5):
bot_name = AGENT_TYPE + ENV_TYPE
if USE_GPU:
bot_name += 'GPU'
if AGENT_TYPE == 'ddt':
policy_agent = DDTAgent(bot_name=bot_name,
input_dim=dim_in,
output_dim=dim_out,
rule_list=False,
num_rules=args.num_leaves)
elif AGENT_TYPE == 'mlp':
policy_agent = MLPAgent(input_dim=dim_in,
bot_name=bot_name,
output_dim=dim_out,
num_hidden=args.num_hidden)
else:
raise Exception('No valid network selected')
reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
| 40.709677 | 112 | 0.621434 |
0a263ee52f1bcf865cb343ad7cbe07411cfb3a5e
| 1,534 |
py
|
Python
|
Week 08/tw10_words_by_prefix.py
|
andrewn488/OMSBA-5061
|
8e57fff45d8965b0423a6fe338bd74cedfe94ea0
|
[
"MIT"
] | null | null | null |
Week 08/tw10_words_by_prefix.py
|
andrewn488/OMSBA-5061
|
8e57fff45d8965b0423a6fe338bd74cedfe94ea0
|
[
"MIT"
] | null | null | null |
Week 08/tw10_words_by_prefix.py
|
andrewn488/OMSBA-5061
|
8e57fff45d8965b0423a6fe338bd74cedfe94ea0
|
[
"MIT"
] | 1 |
2022-02-07T02:42:43.000Z
|
2022-02-07T02:42:43.000Z
|
""" TW10: Words by Prefix
Team: Tam Tamura, Andrew Nalundasan
For: OMSBA 2061, Seattle University
Date: 11/3/2020
"""
question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree']
my_list = []
for elem in question_2:
prefix = elem[:2]
my_list.append(prefix)
print(my_list)
| 24.741935 | 82 | 0.544329 |
0a26a5869fd7404e249d795b4a225c3eca2ac49a
| 2,683 |
py
|
Python
|
openff/bespokefit/executor/services/qcgenerator/cache.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 12 |
2020-08-28T20:49:00.000Z
|
2021-11-17T08:50:32.000Z
|
openff/bespokefit/executor/services/qcgenerator/cache.py
|
openforcefield/bespoke-f
|
27b072bd09610dc8209429118d739e1f453edd61
|
[
"MIT"
] | 95 |
2020-02-19T18:40:54.000Z
|
2021-12-02T10:52:23.000Z
|
openff/bespokefit/executor/services/qcgenerator/cache.py
|
openforcefield/openff-bespokefit
|
85c92a51055a5a82e5d50fee1668a7de4ce2b1d4
|
[
"MIT"
] | 3 |
2021-04-01T04:22:49.000Z
|
2021-04-13T03:19:10.000Z
|
import hashlib
from typing import TypeVar, Union
import redis
from openff.toolkit.topology import Molecule
from openff.bespokefit.executor.services.qcgenerator import worker
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.bespokefit.utilities.molecule import canonical_order_atoms
_T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask)
def cached_compute_task(
task: Union[HessianTask, OptimizationTask, Torsion1DTask],
redis_connection: redis.Redis,
) -> str:
"""Checks to see if a QC task has already been executed and if not send it to a
worker.
"""
if isinstance(task, Torsion1DTask):
compute = worker.compute_torsion_drive
elif isinstance(task, OptimizationTask):
compute = worker.compute_optimization
elif isinstance(task, HessianTask):
compute = worker.compute_hessian
else:
raise NotImplementedError()
# Canonicalize the task to improve the cache hit rate.
task = _canonicalize_task(task)
task_hash = hashlib.sha512(task.json().encode()).hexdigest()
task_id = redis_connection.hget("qcgenerator:task-ids", task_hash)
if task_id is not None:
return task_id.decode()
task_id = compute.delay(task_json=task.json()).id
redis_connection.hset("qcgenerator:types", task_id, task.type)
# Make sure to only set the hash after the type is set in case the connection
# goes down before this information is entered and subsequently discarded.
redis_connection.hset("qcgenerator:task-ids", task_hash, task_id)
return task_id
| 31.197674 | 87 | 0.706299 |
0a277a87fbb9f9430d9ecdf658e9964b1157dc17
| 3,951 |
py
|
Python
|
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py
|
jrzeszutek/cloudify-training-labs
|
5477750d269cb703ce47e35a1c13749fc88f3f6f
|
[
"Apache-2.0"
] | 6 |
2015-07-06T01:10:08.000Z
|
2016-12-21T15:42:07.000Z
|
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py
|
jrzeszutek/cloudify-training-labs
|
5477750d269cb703ce47e35a1c13749fc88f3f6f
|
[
"Apache-2.0"
] | 4 |
2015-08-25T06:32:36.000Z
|
2016-09-07T07:01:34.000Z
|
advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py
|
jrzeszutek/cloudify-training-labs
|
5477750d269cb703ce47e35a1c13749fc88f3f6f
|
[
"Apache-2.0"
] | 14 |
2015-03-28T05:45:58.000Z
|
2017-02-14T02:22:09.000Z
|
'''Copyright Gigaspaces, 2017, All Rights Reserved'''
from cloudify.plugins import lifecycle
OP_START = 'hacker.interfaces.lifecycle.start'
OP_STOP = 'hacker.interfaces.lifecycle.stop'
OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'
OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'
REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP])
def build_instance_sequence(instance, operation,
state_start=None, state_end=None):
'''
Builds sequenced subgraph tasks for an instance
.. note::
The sequence will not be built if the instance provided
does not have a node with an operation defined in the
operation parameter.
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param str operation:
Node (lifecycle) operation to execute
:param str state_start:
Verb to describe operation start
:param str state_stop:
Verb to describe operation finish
'''
tasks = list()
# Only build the sequence if the node operation exists
if operation not in instance.node.operations:
return tasks
# Add task starting state
if state_start:
tasks.append(instance.send_event('%s host' % state_start))
tasks.append(instance.set_state(state_start.lower()))
# Add task operation
tasks.append(instance.execute_operation(operation))
# Add task ended state
if state_end:
tasks.append(instance.send_event('%s host' % state_end))
tasks.append(instance.set_state(state_end.lower()))
return tasks
def build_instance_subgraph(instance, graph):
'''
Builds a subgraph for an instance
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param `TaskDependencyGraph` graph:
Task graph to create sequences from
'''
# Init a "stop instance" subgraph
sg_stop = graph.subgraph('stop_subgraph')
seq_stop = sg_stop.sequence()
seq_stop.add(*build_instance_sequence(
instance, OP_STOP, 'Stopping', 'Stopped'))
# Init a "recreate snapshots" subgraph
sg_snap = graph.subgraph('snapshot_subgraph')
seq_snap = sg_snap.sequence()
if OP_SS_D in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_D))
if OP_SS_C in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_C))
# Init a "start instance" subgraph
sg_start = graph.subgraph('stop_subgraph')
seq_start = sg_start.sequence()
seq_start.add(*build_instance_sequence(
instance, OP_START, 'Starting', 'Started'))
# Create subgraph dependencies
graph.add_dependency(sg_snap, sg_stop)
graph.add_dependency(sg_start, sg_snap)
def refresh_snapshots(ctx, **_):
'''
Executes a complex, graph-based set of lifecycle events
to stop all host (compute) instances, delete all
existing instance snapshots, take new snapshots
of all attached volumes, and start the instances
back up when complete.
'''
graph = ctx.graph_mode()
# Find all compute hosts and build a sequence graph
for node in ctx.nodes:
if not REQUIRED_OPS.issubset(node.operations):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node "%s" because '
'it does not have all required operations defined' % node.id)
continue
# Iterate over each node instance
for instance in node.instances:
if not lifecycle.is_host_node(instance):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node instance '
'"%s" because it is not a compute host' % instance.id)
continue
build_instance_subgraph(instance, graph)
# Execute the sequences
return graph.execute()
| 37.628571 | 77 | 0.679069 |
0a28f4c1d95b682b9a50e90e2f39fe8345b14eab
| 33,404 |
py
|
Python
|
File Transfer/Flyter/flyter.py
|
CryptoNyxz/Miscellaneous-Tools
|
797ea04d7c369469ab3d2a1ae2838c4a7b7b9c02
|
[
"MIT"
] | null | null | null |
File Transfer/Flyter/flyter.py
|
CryptoNyxz/Miscellaneous-Tools
|
797ea04d7c369469ab3d2a1ae2838c4a7b7b9c02
|
[
"MIT"
] | null | null | null |
File Transfer/Flyter/flyter.py
|
CryptoNyxz/Miscellaneous-Tools
|
797ea04d7c369469ab3d2a1ae2838c4a7b7b9c02
|
[
"MIT"
] | null | null | null |
"""
Flyter
Tool for transferring files on the same network using raw sockets.
Doesn't use encryption.
"""
__version__ = (0, 0, 0)
__author__ = "CryptoNyxz"
__license__ = """
MIT License
Copyright (c) 2021 Jaymund Cyrus F. Floranza
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from argparse import ArgumentParser
from base64 import b64encode
from datetime import timedelta
from math import log
from os import altsep, sep, \
mkdir, stat, unlink
from os.path import dirname, exists, join
from random import randint
from secrets import token_bytes
from shutil import get_terminal_size
from socket import \
socket, error, timeout, \
ntohs, ntohl, htons, htonl, \
gethostname, \
AF_INET, SOCK_STREAM
from threading import Thread
from time import time
from warnings import warn
from sys import argv, exit, version_info
if version_info < (3, 6):
warn('[!] Some features are not be compatible with the version of your '
'python interpreter')
FROMTERMINAL = False
# Utility Functions
def random_port(host):
"""Return a random available TCP port."""
while True:
port = randint(10_000, 65536)
with socket(AF_INET, SOCK_STREAM) as sock:
try:
sock.bind((host, port))
except error:
continue
else:
return port
def printerror(errormsg):
"""Print an error message."""
global FROMTERMINAL
if FROMTERMINAL:
print(f'\n[x] {errormsg}')
exit(-1)
exit(-1)
exit(-1)
exit(-1)
else:
warn(errormsg)
def printalert(alert):
"""Print an alert message."""
global FROMTERMINAL
print(f'[!] {alert}')
def int_to_bytes_s(integer):
"""Convert 16 - bit integer to bytes for packing."""
res = ntohs(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_s(byteseq):
"""Convert byte sequence to 16 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htons(res)
def int_to_bytes_l(integer):
"""Convert 32 - but integer to bytes for packing."""
res = ntohl(integer)
res = hex(res)[2:]
res = '0'*(len(res) % 2) + res
return bytes.fromhex(res)
def bytes_to_int_l(byteseq):
"""Convert byte sequence to 32 - but integer for unpacking."""
res = bytes.hex(byteseq)
res = int(res, 16)
return htonl(res)
def pack_str(string):
"""Pack a string into a byte sequence."""
return string.encode()
def unpack_str(byteseq):
"""Unpack a byte sequence into a string."""
return byteseq.decode()
# Utility Classes
# Flyter Classes
# Simplified Functions
def send(ip_address, port, filepath):
"""
Send file to receiver on the same network.
Parameters
----------
ip_address : str
The target receiver's IP address.
port : int
The target receiver's main TCP port.
filepath : str
The path to the file to be sent.
"""
sender = FlyterSender(ip_address, port)
sender.recv_param_set()
return sender.send_file(filepath)
def receive(host_ip_address, port, workers=1):
"""
Receive a file from sender on the same network.
Parameters
----------
host_ip_address : str
The receiver's host IP address.
port : int
The receiver's host port to listen on.
workers : :obj:`int`, optional
The number of workers to use.
"""
receiver = FlyterReciever(host_ip_address, port, workers)
receiver.send_param_set()
receiver.recv_file()
if __name__ == '__main__':
parser = ArgumentParser(
prog="Flyter",
epilog="See '<command> --help' to read about a specific sub-command."
)
subparsers = parser.add_subparsers(
dest="action",
help="The action to be performed"
)
send_parser = subparsers.add_parser("send")
recv_parser = subparsers.add_parser("recv")
send_parser.add_argument('-i', '--ip',
required=True,
help="Target receiver's IP address")
send_parser.add_argument('-p', '--port',
type=int,
required=True,
help="Target receiver's TCP port number")
send_parser.add_argument('-f', '--file',
required=True,
help="Path to the file to be sent")
recv_parser.add_argument('-i', '--ip',
required=True,
help="Host IP address")
recv_parser.add_argument('-p', '--port',
type=int,
required=True,
help="TCP port to listen on")
recv_parser.add_argument('-w', '--workers',
type=int,
default=1,
help="TCP port to listen on")
if len(argv) > 1:
FROMTERMINAL = True
args = parser.parse_args()
if args.action == "send":
send(args.ip, args.port, args.file)
elif args.action == "recv":
receive(args.ip, args.port, args.workers)
else:
parser.print_help()
| 31.542965 | 79 | 0.55176 |
0a29f1417c7897dd77e238158ebcffa7aedd19a4
| 14,751 |
py
|
Python
|
tests/test_modeling_tf_led.py
|
patelrajnath/transformers
|
98afe9d7c94a840d4b30c7eb76f9bfe570d2ed50
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_tf_led.py
|
patelrajnath/transformers
|
98afe9d7c94a840d4b30c7eb76f9bfe570d2ed50
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modeling_tf_led.py
|
patelrajnath/transformers
|
98afe9d7c94a840d4b30c7eb76f9bfe570d2ed50
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if tf.debugging.assert_near(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
TOLERANCE = 1e-4
| 42.266476 | 119 | 0.686055 |
0a2a4e7e62506f1bbd8360775e618cece1d71944
| 5,239 |
py
|
Python
|
src/wann_genetic/individual/numpy/ffnn.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
src/wann_genetic/individual/numpy/ffnn.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
src/wann_genetic/individual/numpy/ffnn.py
|
plonerma/wann-genetic
|
c4a8a1db81665b2549994d615e1d347dbe00226a
|
[
"MIT"
] | null | null | null |
import numpy as np
import sklearn
import logging
from wann_genetic.individual.network_base import BaseFFNN
def softmax(x, axis=-1):
"""Compute softmax values for each sets of scores in x.
Returns:
softmax - softmax normalized in dim axis
"""
e_x = np.exp(x - np.expand_dims(np.max(x,axis=axis), axis=axis))
s = (e_x / np.expand_dims(e_x.sum(axis=-1), axis=axis))
return s
def apply_act_function(available_funcs, selected_funcs, x=None):
"""Apply the activation function of the selected nodes to their sums.
This fullfils the same function as the
:class:`wann_genetic.individual.torch.ffn.MultiActivationModule`.
"""
if x is not None:
result = np.empty(x.shape)
for i, func in enumerate(selected_funcs):
assert func < len(available_funcs)
result[..., i] = available_funcs[func][1](x[..., i])
return result
else:
return np.array([ # return function names
available_funcs[func][0] for func in selected_funcs
])
| 33.158228 | 86 | 0.601642 |
0a2acc58ab0f0250a6af12c5eb3f75f975289067
| 14,665 |
py
|
Python
|
common/tests/util.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 14 |
2020-03-25T11:11:29.000Z
|
2022-03-08T20:41:33.000Z
|
common/tests/util.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 352 |
2020-03-25T10:42:09.000Z
|
2022-03-30T15:32:26.000Z
|
common/tests/util.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 3 |
2020-08-06T12:22:41.000Z
|
2022-01-16T11:51:12.000Z
|
import contextlib
from datetime import date
from datetime import datetime
from datetime import timezone
from functools import wraps
from io import BytesIO
from itertools import count
from typing import Any
from typing import Dict
from typing import Sequence
import pytest
from dateutil.parser import parse as parse_date
from dateutil.relativedelta import relativedelta
from django import forms
from django.core.exceptions import ValidationError
from django.template.loader import render_to_string
from django.urls import reverse
from freezegun import freeze_time
from lxml import etree
from common.models.records import TrackedModel
from common.renderers import counter_generator
from common.serializers import validate_taric_xml_record_order
from common.util import TaricDateRange
from common.util import get_accessor
from common.util import get_field_tuple
INTERDEPENDENT_IMPORT_IMPLEMENTED = True
UPDATE_IMPORTER_IMPLEMENTED = True
EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False
COMMODITIES_IMPLEMENTED = True
MEURSING_TABLES_IMPLEMENTED = False
PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False
UTC = timezone.utc
requires_commodities = pytest.mark.skipif(
not COMMODITIES_IMPLEMENTED,
reason="Commodities not implemented",
)
requires_export_refund_nomenclature = pytest.mark.skipif(
not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED,
reason="Export refund nomenclature not implemented",
)
requires_meursing_tables = pytest.mark.skipif(
not MEURSING_TABLES_IMPLEMENTED,
reason="Meursing tables not implemented",
)
requires_partial_temporary_stop = pytest.mark.skipif(
not PARTIAL_TEMPORARY_STOP_IMPLEMENTED,
reason="Partial temporary stop not implemented",
)
requires_interdependent_import = pytest.mark.skipif(
not INTERDEPENDENT_IMPORT_IMPLEMENTED,
reason="Interdependent imports not implemented",
)
requires_update_importer = pytest.mark.skipif(
not UPDATE_IMPORTER_IMPLEMENTED,
reason="Requires Updating importers to be implemented",
)
def make_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are duplicates of each
other and returns the record created last."""
existing = factory.create()
# allow overriding identifying_fields
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
return factory.create(
**dict(get_field_tuple(existing, field) for field in identifying_fields)
)
def make_non_duplicate_record(factory, identifying_fields=None):
"""Creates two records using the passed factory that are not duplicates of
each other and returns the record created last."""
existing = factory.create()
not_duplicate = factory.create()
if identifying_fields is None:
identifying_fields = list(factory._meta.model.identifying_fields)
assert any(
get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f)
for f in identifying_fields
)
return not_duplicate
def get_checkable_data(model: TrackedModel, ignore=frozenset()):
"""
Returns a dict representing the model's data ignoring any automatically set
fields and fields with names passed to `ignore`.
The returned data will contain the identifying fields for any linked
models rather than internal PKs.
For example:
get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"})
# {
# "description": "My sample footnote text",
# "described_footnote": {
# "footnote_type__footnote_type_id": "FN"
# "footnote_id": "123",
# },
# }
"""
checked_field_names = {f.name for f in model.copyable_fields} - ignore
data = {
name: getattr(model, get_accessor(model._meta.get_field(name)))
for name in checked_field_names
}
identifying_fields = {
name: data[name].get_identifying_fields()
for name in checked_field_names
if hasattr(data[name], "get_identifying_fields")
}
data.update(identifying_fields)
return data
def assert_records_match(
expected: TrackedModel,
imported: TrackedModel,
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported model is the same
as the data in the expected model.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = get_checkable_data(expected, ignore=ignore)
imported_data = get_checkable_data(imported, ignore=ignore)
assert expected_data == imported_data
def assert_many_records_match(
expected: Sequence[TrackedModel],
imported: Sequence[TrackedModel],
ignore=frozenset(),
):
"""
Asserts that every value for every field in the imported models is the same
as the data in the expected models, and that the count of both is equal.
System fields that will change from model to model are not checked. Any
field names given to `ignore` will also not be checked.
"""
expected_data = [get_checkable_data(e, ignore=ignore) for e in expected]
imported_data = [get_checkable_data(i, ignore=ignore) for i in imported]
assert expected_data == imported_data
_transaction_counter = count(start=1)
def taric_xml_record_codes(xml):
"""Yields tuples of (record_code, subrecord_code)"""
records = xml.xpath(".//*[local-name() = 'record']")
codes = etree.XPath(
".//*[local-name()='record.code' or local-name()='subrecord.code']/text()",
)
return [tuple(codes(record)) for record in records]
def only_applicable_after(cutoff):
"""
Decorator which asserts that a test fails after a specified cutoff date.
:param cutoff: A date string, or datetime object before which the test should fail.
"""
cutoff = parse_date(cutoff)
return decorator
def validity_period_post_data(start: date, end: date) -> Dict[str, int]:
"""
Construct a POST data fragment for the validity period start and end dates
of a ValidityPeriodForm from the given date objects, eg:
>>> validity_period_post_data(
>>> datetime.date(2021, 1, 2),
>>> datetime.date(2022, 3, 4),
>>> )
{
"start_date_0": 1,
"start_date_1": 2,
"start_date_2": 2021,
"end_date_0": 4,
"end_date_1": 3,
"end_date_2": 2022,
}
"""
return {
f"{name}_{i}": part
for name, date in (("start_date", start), ("end_date", end))
for i, part in enumerate([date.day, date.month, date.year])
}
def get_form_data(form: forms.ModelForm) -> Dict[str, Any]:
"""Returns a dictionary of the fields that the form will put onto a page and
their current values, taking account of any fields that have sub-fields and
hence result in multiple HTML <input> objects."""
data = {**form.initial}
for field in form.rendered_fields:
value = data[field] if field in data else form.fields[field].initial
if hasattr(form.fields[field].widget, "decompress"):
# If the widget can be decompressed, then it is not just a simple
# value and has some internal structure. So we need to generate one
# form item per decompressed value and append the name with _0, _1,
# etc. This mirrors the MultiValueWidget in django/forms/widgets.py.
if field in data:
del data[field]
value = form.fields[field].widget.decompress(value)
data.update(
**{f"{field}_{i}": v for i, v in enumerate(value) if v is not None}
)
elif value is not None:
data.setdefault(field, value)
return data
| 31.268657 | 93 | 0.633277 |
0a2ad964a50ee086e447a623b3863c7fbb9ef26a
| 1,977 |
py
|
Python
|
src/com/python/email/send_mail.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/email/send_mail.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/email/send_mail.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2016810
@author: Administrator
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.multipart import MIMEBase
from email.utils import parseaddr, formataddr
import smtplib
from_addr = '[email protected]'#input('From: ')
password = input('Password: ')
to_addr = '[email protected]'#input('To: ')
smtp_server = 'smtp.163.com'#input('SMTP server: ')
#
# msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
# HTML
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
#
# :
msg = MIMEMultipart()
msg['From'] = _format_addr('Python <%s>' % from_addr)
msg['To'] = _format_addr(' <%s>' % to_addr)
msg['Subject'] = Header('SMTP', 'utf-8').encode()
# MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# MIMEBase:
with open('D:/pythonWorkspace/pthonDemo/src/com/python/email/test.jpg', 'rb') as f:
# MIMEpng:
mime = MIMEBase('image', 'png', filename='test.png')
# :
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# :
mime.set_payload(f.read())
# Base64:
encoders.encode_base64(mime)
# MIMEMultipart:
msg.attach(mime)
msg['From'] = _format_addr('Python <%s>' % from_addr)
msg['To'] = _format_addr(' <%s>' % to_addr)
msg['Subject'] = Header('SMTP', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
| 29.073529 | 83 | 0.676277 |
0a2b055f1a31031a96cc1310b503a9a15b43be19
| 4,176 |
py
|
Python
|
aqui_carattino/blog/migrations/0002_auto_20200424_1452.py
|
aquilesC/aquicarattino
|
b6d873aea6e3ec9d1b802ea13952746e4fcc22b6
|
[
"MIT"
] | null | null | null |
aqui_carattino/blog/migrations/0002_auto_20200424_1452.py
|
aquilesC/aquicarattino
|
b6d873aea6e3ec9d1b802ea13952746e4fcc22b6
|
[
"MIT"
] | 2 |
2020-05-08T04:30:26.000Z
|
2021-06-17T20:19:02.000Z
|
aqui_carattino/blog/migrations/0002_auto_20200424_1452.py
|
aquilesC/aquicarattino
|
b6d873aea6e3ec9d1b802ea13952746e4fcc22b6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-24 12:52
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
| 68.459016 | 1,357 | 0.670498 |
0a2b4094e1ca26bb245cb9af7bc67b4f16fdf9b2
| 2,224 |
py
|
Python
|
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
|
openforcefield/nistdataselection
|
d797d597f4ff528a7219d58daa8ef6508d438b24
|
[
"MIT"
] | 3 |
2020-03-25T02:42:04.000Z
|
2020-07-20T10:39:35.000Z
|
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
|
openforcefield/nistdataselection
|
d797d597f4ff528a7219d58daa8ef6508d438b24
|
[
"MIT"
] | 13 |
2019-09-05T00:20:03.000Z
|
2020-03-05T23:58:04.000Z
|
studies/mixture_feasibility/parsley_benchmark/alcohol_ester/run.py
|
openforcefield/nistdataselection
|
d797d597f4ff528a7219d58daa8ef6508d438b24
|
[
"MIT"
] | null | null | null |
from evaluator import unit
from evaluator.backends import QueueWorkerResources
from evaluator.backends.dask import DaskLSFBackend
from evaluator.client import ConnectionOptions, EvaluatorClient
from evaluator.datasets import PhysicalPropertyDataSet
from evaluator.forcefield import SmirnoffForceFieldSource
from evaluator.server import EvaluatorServer
from evaluator.utils import setup_timestamp_logging
if __name__ == "__main__":
main()
| 31.771429 | 85 | 0.71223 |
0a2b482bae656ac79eb981d550db6a1224027b57
| 2,268 |
py
|
Python
|
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 128 |
2021-12-06T15:41:14.000Z
|
2022-03-29T13:16:32.000Z
|
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 28 |
2021-12-11T08:11:31.000Z
|
2022-03-25T02:35:43.000Z
|
nuplan/planning/simulation/observation/idm/test/test_profile_idm_observation.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 14 |
2021-12-11T04:12:26.000Z
|
2022-03-24T06:38:30.000Z
|
import logging
import unittest
from pyinstrument import Profiler
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.idm_agents import IDMAgents
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
unittest.main()
| 36.580645 | 119 | 0.665785 |
0a2c405aef1ab33457cf8c88423bb2ac392300fb
| 11,867 |
py
|
Python
|
baselines/ddpg/ddpg.py
|
RDaneelOlivav/baselines
|
fea6ba932055bb76d68b4b22e812bab738fc18f8
|
[
"MIT"
] | 11 |
2021-02-23T17:15:21.000Z
|
2021-09-08T21:31:57.000Z
|
baselines/ddpg/ddpg.py
|
RDaneelOlivav/baselines
|
fea6ba932055bb76d68b4b22e812bab738fc18f8
|
[
"MIT"
] | 1 |
2021-03-04T05:49:46.000Z
|
2021-03-04T10:50:59.000Z
|
baselines/ddpg/ddpg.py
|
RDaneelOlivav/baselines
|
fea6ba932055bb76d68b4b22e812bab738fc18f8
|
[
"MIT"
] | 2 |
2021-01-29T10:40:35.000Z
|
2021-03-03T08:03:59.000Z
|
import os
import os.path as osp
import time
from collections import deque
import pickle
from baselines.ddpg.ddpg_learner import DDPG
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from baselines.common import set_global_seeds
from baselines import logger
import tensorflow as tf
import numpy as np
try:
from mpi4py import MPI
except ImportError:
MPI = None
| 41.493007 | 188 | 0.610854 |
0a2e0012f198d1fec400f883216fa2149bcfd26b
| 1,889 |
py
|
Python
|
footprints/transaction_details.py
|
enwawerueli/footprints
|
d9b2a0064b21495edfd0563cb521b0675ee4363d
|
[
"MIT"
] | 1 |
2018-10-11T19:23:08.000Z
|
2018-10-11T19:23:08.000Z
|
footprints/transaction_details.py
|
enwawerueli/footprints
|
d9b2a0064b21495edfd0563cb521b0675ee4363d
|
[
"MIT"
] | null | null | null |
footprints/transaction_details.py
|
enwawerueli/footprints
|
d9b2a0064b21495edfd0563cb521b0675ee4363d
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtPrintSupport import QPrinter, QPrintDialog
from jinja2 import TemplateNotFound
from .ui.ui_transaction_details import Ui_TransactionDetails
from .ui import images_rc
from . import jinja_env
from .exceptions import PrinterError
| 38.55102 | 119 | 0.68343 |
0a2e68851d4d316362a1de570d5c1e2e08a4775e
| 64,070 |
py
|
Python
|
yt/units/yt_array.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/units/yt_array.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/units/yt_array.py
|
FeiLi5/git-github.com-yt-project-yt
|
0c6cf75351b91e4da80f6a0207ebbcb73dd72a59
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def preserve_units(unit1, unit2=None):
return unit1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
else: # numpy version equal to or newer than 1.13
def __array_finalize__(self, obj):
if obj is None and hasattr(self, 'units'):
return
self.units = getattr(obj, 'units', NULL_UNIT)
def __pos__(self):
""" Posify the data. """
# this needs to be defined for all numpy versions, see
# numpy issue #9081
return type(self)(super(YTArray, self).__pos__(), self.units)
def __reduce__(self):
"""Pickle reduction method
See the documentation for the standard library pickle module:
http://docs.python.org/2/library/pickle.html
Unit metadata is encoded in the zeroth element of third element of the
returned tuple, itself a tuple used to restore the state of the ndarray.
This is always defined for numpy arrays.
"""
np_ret = super(YTArray, self).__reduce__()
obj_state = np_ret[2]
unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],)
new_ret = np_ret[:2] + unit_state + np_ret[3:]
return new_ret
def __setstate__(self, state):
"""Pickle setstate method
This is called inside pickle.read() and restores the unit data from the
metadata extracted in __reduce__ and then serialized by pickle.
"""
super(YTArray, self).__setstate__(state[1:])
try:
unit, lut = state[0]
except TypeError:
# this case happens when we try to load an old pickle file
# created before we serialized the unit symbol lookup table
# into the pickle file
unit, lut = str(state[0]), default_unit_symbol_lut.copy()
# need to fix up the lut if the pickle was saved prior to PR #1728
# when the pickle format changed
if len(lut['m']) == 2:
lut.update(default_unit_symbol_lut)
for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]:
lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}')
registry = UnitRegistry(lut=lut, add_default_symbols=False)
self.units = Unit(unit, registry=registry)
def __deepcopy__(self, memodict=None):
"""copy.deepcopy implementation
This is necessary for stdlib deepcopy of arrays and quantities.
"""
if memodict is None:
memodict = {}
ret = super(YTArray, self).__deepcopy__(memodict)
return type(self)(ret, copy.deepcopy(self.units))
class YTQuantity(YTArray):
"""
A scalar associated with a unit.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the quantity. Powers must be specified using python syntax
(cm**3, not cm^3).
registry : A UnitRegistry object
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data.
Examples
--------
>>> from yt import YTQuantity
>>> a = YTQuantity(1, 'cm')
>>> b = YTQuantity(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTQuantity(12, 'g/cm**3')
>>> np.abs(a)
12 g/cm**3
and strip them when it would be annoying to deal with them.
>>> print(np.log10(a))
1.07918124605
YTQuantity is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
This is equivalent to:
>>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
def validate_numpy_wrapper_units(v, arrs):
if not any(isinstance(a, YTArray) for a in arrs):
return v
if not all(isinstance(a, YTArray) for a in arrs):
raise RuntimeError("Not all of your arrays are YTArrays.")
a1 = arrs[0]
if not all(a.units == a1.units for a in arrs[1:]):
raise RuntimeError("Your arrays must have identical units.")
v.units = a1.units
return v
def uconcatenate(arrs, axis=0):
"""Concatenate a sequence of arrays.
This wrapper around numpy.concatenate preserves units. All input arrays must
have the same units. See the documentation of numpy.concatenate for full
details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uconcatenate((A, B))
YTArray([ 1., 2., 3., 2., 3., 4.]) cm
"""
v = np.concatenate(arrs, axis=axis)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Applies the cross product to two YT arrays.
This wrapper around numpy.cross preserves units.
See the documentation of numpy.cross for full
details.
"""
v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
units = arr1.units * arr2.units
arr = YTArray(v, units, registry=registry)
return arr
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uintersect1d(A, B)
YTArray([ 2., 3.]) cm
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def unorm(data, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm that preserves units
This is a wrapper around np.linalg.norm that preserves units. See
the documentation for that function for descriptions of the keyword
arguments.
The keepdims argument is ignored if the version of numpy installed is
older than numpy 1.10.0.
"""
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
norm = np.linalg.norm(data, ord=ord, axis=axis)
else:
norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
if norm.shape == ():
return YTQuantity(norm, data.units)
return YTArray(norm, data.units)
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
"""
dot = np.dot(op1.d, op2.d)
units = op1.units*op2.units
if dot.shape == ():
return YTQuantity(dot, units)
return YTArray(dot, units)
def uvstack(arrs):
"""Stack arrays in sequence vertically (row wise) while preserving units
This is a wrapper around np.vstack that preserves units.
"""
v = np.vstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def uhstack(arrs):
"""Stack arrays in sequence horizontally (column wise) while preserving units
This is a wrapper around np.hstack that preserves units.
"""
v = np.hstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ustack(arrs, axis=0):
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the
first dimension and if ``axis=-1`` it will be the last dimension.
This is a wrapper around np.stack that preserves units.
"""
v = np.stack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0],field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == 'particle':
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def get_binary_op_return_class(cls1, cls2):
if cls1 is cls2:
return cls1
if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)):
return cls2
if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)):
return cls1
if issubclass(cls1, YTQuantity):
return cls2
if issubclass(cls2, YTQuantity):
return cls1
if issubclass(cls1, cls2):
return cls1
if issubclass(cls2, cls1):
return cls2
else:
raise RuntimeError("Undefined operation for a YTArray subclass. "
"Received operand types (%s) and (%s)" % (cls1, cls2))
def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
r"""
Load YTArrays with unit information from a text file. Each row in the
text file must have the same number of values.
Parameters
----------
fname : str
Filename to read.
dtype : data-type, optional
Data-type of the resulting array; default: float.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
Examples
--------
>>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
"""
f = open(fname, 'r')
next_one = False
units = []
num_cols = -1
for line in f.readlines():
words = line.strip().split()
if len(words) == 0:
continue
if line[0] == comments:
if next_one:
units = words[1:]
if len(words) == 2 and words[1] == "Units":
next_one = True
else:
# Here we catch the first line of numbers
try:
col_words = line.strip().split(delimiter)
for word in col_words:
float(word)
num_cols = len(col_words)
break
except ValueError:
mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
f.close()
if len(units) != num_cols:
mylog.warning("Malformed or incomplete units header. Arrays will be "
"dimensionless!")
units = ["dimensionless"]*num_cols
arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
delimiter=delimiter, converters=None,
unpack=True, usecols=usecols, ndmin=0)
if usecols is not None:
units = [units[col] for col in usecols]
mylog.info("Array units: %s" % ", ".join(units))
return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
footer='', comments='#'):
r"""
Write YTArrays with unit information to a text file.
Parameters
----------
fname : str
The file to write the YTArrays to.
arrays : list of YTArrays or single YTArray
The array(s) to write to the file.
fmt : str or sequence of strs, optional
A single format (%10.5f), or a sequence of formats.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file, before the
unit header.
footer : str, optional
String that will be written at the end of the file.
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``yt.loadtxt``.
Examples
--------
>>> sp = ds.sphere("c", (100,"kpc"))
>>> a = sp["density"]
>>> b = sp["temperature"]
>>> c = sp["velocity_x"]
>>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
"""
if not isinstance(arrays, list):
arrays = [arrays]
units = []
for array in arrays:
if hasattr(array, "units"):
units.append(str(array.units))
else:
units.append("dimensionless")
if header != '':
header += '\n'
header += " Units\n " + '\t'.join(units)
np.savetxt(fname, np.transpose(arrays), header=header,
fmt=fmt, delimiter=delimiter, footer=footer,
newline='\n', comments=comments)
| 35.106849 | 119 | 0.592836 |
0a306266dca5739cfacd9015b52dba19c79b8c41
| 1,548 |
py
|
Python
|
src/posts/api/serializers.py
|
MahmoudMagdi20/django_rest_blog_api
|
e1969c75e20b4d807baf26051924a0b99a23b4dc
|
[
"MIT"
] | null | null | null |
src/posts/api/serializers.py
|
MahmoudMagdi20/django_rest_blog_api
|
e1969c75e20b4d807baf26051924a0b99a23b4dc
|
[
"MIT"
] | null | null | null |
src/posts/api/serializers.py
|
MahmoudMagdi20/django_rest_blog_api
|
e1969c75e20b4d807baf26051924a0b99a23b4dc
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from posts.models import Post
post_detail_url = serializers.HyperlinkedIdentityField(
view_name='posts-api:detail',
lookup_field='slug',
)
| 22.434783 | 62 | 0.541344 |
0a31cb53c607d4ae46c2c3f0ae523a2030f68afc
| 1,085 |
py
|
Python
|
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17 |
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4 |
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGAGTP/GTPA036ETestCase.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18 |
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
from tir import Webapp
import unittest
if __name__ == '__main__':
unittest.main()
| 35 | 94 | 0.62765 |
0a31df6f647d431831f887f61ef014c34b1a74e5
| 1,436 |
py
|
Python
|
code_tmpl/views.py
|
lovebirdegg/nnms-server
|
9fd4563ccca9f29add375d346cdd1c2dd636c512
|
[
"MIT"
] | null | null | null |
code_tmpl/views.py
|
lovebirdegg/nnms-server
|
9fd4563ccca9f29add375d346cdd1c2dd636c512
|
[
"MIT"
] | null | null | null |
code_tmpl/views.py
|
lovebirdegg/nnms-server
|
9fd4563ccca9f29add375d346cdd1c2dd636c512
|
[
"MIT"
] | null | null | null |
# @Time : {time}
# @Author : code_generator
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import ListAPIView
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.response import Response
from rest_framework.decorators import api_view,authentication_classes,permission_classes,action
from common.custom import CommonPagination, RbacPermission
from django_filters.rest_framework import DjangoFilterBackend
from django.http import HttpResponse,FileResponse,JsonResponse
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_xops.basic import XopsResponse
from rest_xops.code import *
from django.db.models import Q
from django.apps import apps
from ..models import {model_camel_case_name}
from django.contrib.contenttypes.models import ContentType
from ..serializers.{model_name}_serializers import *
class {model_camel_case_name}View(ModelViewSet):
queryset = {model_camel_case_name}.objects.all()
serializer_class = {model_camel_case_name}Serializer
filter_backends = (DjangoFilterBackend, SearchFilter,OrderingFilter)
pagination_class = CommonPagination
ordering_fields = ('id',)
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
filter_fields = ({filter_fields})
search_fields = ({search_fields})
| 39.888889 | 95 | 0.83078 |
0a32bc170cadd36fc1306d343ea0e49f3379160d
| 1,654 |
py
|
Python
|
src/collectors/heartbeat/heartbeat.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 2 |
2016-11-17T13:17:50.000Z
|
2017-03-28T19:42:04.000Z
|
src/collectors/heartbeat/heartbeat.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 62 |
2016-09-30T14:04:52.000Z
|
2021-04-22T21:22:28.000Z
|
src/collectors/heartbeat/heartbeat.py
|
art19/netuitive-diamond
|
57f61f2444e6f3d3692b4ee989415939bfaa932e
|
[
"MIT"
] | 4 |
2017-01-24T14:44:56.000Z
|
2021-03-03T17:14:19.000Z
|
# coding=utf-8
"""
Send a value of 1 as a heartbeat every time this collector is invoked.
#### Dependencies
None
#### Usage
Add the collector config as :
enabled = True
path = netuitive
Metrics are collected as :
- metrics.heartbeat
Netuitive Change History
========================
DVG 2016/11/14 Initial version.
"""
import diamond.collector
from diamond.utils.config import load_config as load_server_config
try:
import netuitive
except ImportError:
netuitive = None
| 25.84375 | 90 | 0.615478 |
0a33b4fb181d675d2537be4a920a504933aa3c82
| 6,599 |
py
|
Python
|
process_script/stat.py
|
vitorebatista/AVEMH
|
1c0bea3ae6c35729b80ba49b9663ce83ea43922d
|
[
"MIT"
] | 2 |
2020-11-11T14:02:53.000Z
|
2020-12-10T00:10:50.000Z
|
process_script/stat.py
|
vitorebatista/AVEMH
|
1c0bea3ae6c35729b80ba49b9663ce83ea43922d
|
[
"MIT"
] | null | null | null |
process_script/stat.py
|
vitorebatista/AVEMH
|
1c0bea3ae6c35729b80ba49b9663ce83ea43922d
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import sys
markets = ["hangseng", "dax", "ftse", "sp", "nikkei"]
market = markets[int(sys.argv[1])-1]
# read GD data file
dat = pd.read_csv("./num_res/{}.GD.csv".format(market))
# split into two experiments
exp1_GD = dat[dat.columns[:5]]
exp2_GD = dat[dat.columns[5:]]
# calculate statistics
stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()])
stat1_GD.index = ["Best", "Median", "Std."]
stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()])
stat2_GD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_GD = stat1_GD.loc["Median"].sort_values()
best1_GD = list(meds1_GD.index[:2])
meds2_GD = stat2_GD.loc["Median"].sort_values()
best2_GD = list(meds2_GD.index[:2])
print("{}.GD:".format(market), best1_GD[0], best1_GD[1])
# print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error
# read Spacing data file
dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market))
# split into two experiments
exp1_Spacing = dat[dat.columns[:5]]
exp2_Spacing = dat[dat.columns[5:]]
# calculate statistics
stat1_Spacing = pd.DataFrame(
[exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()])
stat1_Spacing.index = ["Best", "Median", "Std."]
stat2_Spacing = pd.DataFrame(
[exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()])
stat2_Spacing.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Spacing = stat1_Spacing.loc["Median"].sort_values()
best1_Spacing = list(meds1_Spacing.index[:2])
meds2_Spacing = stat2_Spacing.loc["Median"].sort_values()
best2_Spacing = list(meds2_Spacing.index[:2])
print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1])
# print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error
# read MaxSpread data file
dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market))
# split into two experiments
exp1_MaxSpread = dat[dat.columns[:5]]
exp2_MaxSpread = dat[dat.columns[5:]]
# calculate statistics
stat1_MaxSpread = pd.DataFrame(
[exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()])
stat1_MaxSpread.index = ["Best", "Median", "Std."]
stat2_MaxSpread = pd.DataFrame(
[exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()])
stat2_MaxSpread.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False)
best1_MaxSpread = list(meds1_MaxSpread.index[:2])
meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False)
best2_MaxSpread = list(meds2_MaxSpread.index[:2])
print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1])
# print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error
# read Delta data file
dat = pd.read_csv("./num_res/{}.Delta.csv".format(market))
# split into two experiments
exp1_Delta = dat[dat.columns[:5]]
exp2_Delta = dat[dat.columns[5:]]
# calculate statistics
stat1_Delta = pd.DataFrame(
[exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()])
stat1_Delta.index = ["Best", "Median", "Std."]
stat2_Delta = pd.DataFrame(
[exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()])
stat2_Delta.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Delta = stat1_Delta.loc["Median"].sort_values()
best1_Delta = list(meds1_Delta.index[:2])
meds2_Delta = stat2_Delta.loc["Median"].sort_values()
best2_Delta = list(meds2_Delta.index[:2])
print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1])
# print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error
# read IGD data file
dat = pd.read_csv("./num_res/{}.IGD.csv".format(market))
# split into two experiments
exp1_IGD = dat[dat.columns[:5]]
exp2_IGD = dat[dat.columns[5:]]
# calculate statistics
stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()])
stat1_IGD.index = ["Best", "Median", "Std."]
stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()])
stat2_IGD.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_IGD = stat1_IGD.loc["Median"].sort_values()
best1_IGD = list(meds1_IGD.index[:2])
meds2_IGD = stat2_IGD.loc["Median"].sort_values()
best2_IGD = list(meds2_IGD.index[:2])
print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1])
# print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error
# read Hypervolume data file
dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market))
# split into two experiments
exp1_Hypervolume = dat[dat.columns[:5]]
exp2_Hypervolume = dat[dat.columns[5:]]
# calculate statistics
stat1_Hypervolume = pd.DataFrame(
[exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()])
stat1_Hypervolume.index = ["Best", "Median", "Std."]
stat2_Hypervolume = pd.DataFrame(
[exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()])
stat2_Hypervolume.index = ["Best", "Median", "Std."]
# find best and second best algorithm
meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values(
ascending=False)
best1_Hypervolume = list(meds1_Hypervolume.index[:2])
meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values(
ascending=False)
best2_Hypervolume = list(meds2_Hypervolume.index[:2])
print("{}.Hypervolume:".format(market),
best1_Hypervolume[0], best1_Hypervolume[1])
# print("{}.Hypervolume:".format(market),
# best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error
print("{}\n----------------------------------------------".format(market))
pd.options.display.float_format = '{:.2e}'.format
stat1_overall = pd.concat(
[stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume])
stat2_overall = pd.concat(
[stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume])
arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread",
"Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"],
stat1_overall.index
]
index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""])
stat1_overall.index = index
stat2_overall.index = index
print(stat1_overall)
print("----------------------------------------------")
print(stat2_overall)
| 39.279762 | 105 | 0.690711 |
0a33cb634cfe076d601a3145a01487981499f068
| 22,712 |
py
|
Python
|
Scripts/calc_Utilities.py
|
zmlabe/ThicknessSensitivity
|
6defdd897a61d7d1a02f34a9f4ec92b2b17b3075
|
[
"MIT"
] | 1 |
2017-10-22T02:22:14.000Z
|
2017-10-22T02:22:14.000Z
|
Scripts/calc_Utilities.py
|
zmlabe/ThicknessSensitivity
|
6defdd897a61d7d1a02f34a9f4ec92b2b17b3075
|
[
"MIT"
] | null | null | null |
Scripts/calc_Utilities.py
|
zmlabe/ThicknessSensitivity
|
6defdd897a61d7d1a02f34a9f4ec92b2b17b3075
|
[
"MIT"
] | 4 |
2018-04-05T17:55:36.000Z
|
2022-03-31T07:05:01.000Z
|
"""
Functions are useful untilities for SITperturb experiments
Notes
-----
Author : Zachary Labe
Date : 13 August 2017
Usage
-----
[1] calcDecJan(varx,vary,lat,lon,level,levsq)
[2] calcDecJanFeb(varx,vary,lat,lon,level,levsq)
[3] calc_indttest(varx,vary)
[4] calc_weightedAve(var,lats)
[5] calc_spatialCorr(varx,vary,lats,lons,weight)
[6] calc_RMSE(varx,vary,lats,lons,weight)
[7] calc_spatialCorrHeight(varx,vary,lats,lons,weight)
[8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq)
"""
def calcDecJan(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_dj : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_dj : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_dj = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djappendf = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (ON,DJ,FM)!')
print('*Completed: Finished calcDecJan function!')
return varx_dj,vary_dj
###############################################################################
###############################################################################
###############################################################################
def calcDecJanFeb(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January-February
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_djf : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_djf : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
djfappendh = np.append(djfappendh1,varxravel[13+i,:,:])
djfappendf = np.append(djfappendf1,varyravel[13+i,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_djf = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
djfappendh = np.append(djfappendh1,
varxravel[13+i,:,:,:])
djfappendf = np.append(djfappendf1,
varyravel[13+i,:,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (DJF)!')
print('*Completed: Finished calcDecJanFeb function!')
return varx_djf,vary_djf
###############################################################################
###############################################################################
###############################################################################
def calc_indttest(varx,vary):
"""
Function calculates statistical difference for 2 independent
sample t-test
Parameters
----------
varx : 3d array
vary : 3d array
Returns
-------
stat = calculated t-statistic
pvalue = two-tailed p-value
Usage
-----
stat,pvalue = calc_ttest(varx,vary)
"""
print('\n>>> Using calc_ttest function!')
### Import modules
import numpy as np
import scipy.stats as sts
### 2-independent sample t-test
stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')
### Significant at 95% confidence level
pvalue[np.where(pvalue >= 0.05)] = np.nan
pvalue[np.where(pvalue < 0.05)] = 1.
print('*Completed: Finished calc_ttest function!')
return stat,pvalue
###############################################################################
###############################################################################
###############################################################################
def calc_weightedAve(var,lats):
"""
Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month]
Parameters
----------
var : 5d,4d,3d array of a gridded variable
lats : 2d array of latitudes
Returns
-------
meanvar : weighted average for 3d,2d,1d array
Usage
-----
meanvar = calc_weightedAve(var,lats)
"""
print('\n>>> Using calc_weightedAve function!')
### Import modules
import numpy as np
### Calculate weighted average for various dimensional arrays
if var.ndim == 5:
meanvar = np.empty((var.shape[0],var.shape[1],var.shape[2]))
for ens in range(var.shape[0]):
for i in range(var.shape[1]):
for j in range(var.shape[2]):
varq = var[ens,i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[ens,i,j] = np.nansum(varmask*areamask) \
/np.sum(areamask)
elif var.ndim == 4:
meanvar = np.empty((var.shape[0],var.shape[1]))
for i in range(var.shape[0]):
for j in range(var.shape[1]):
varq = var[i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 3:
meanvar = np.empty((var.shape[0]))
for i in range(var.shape[0]):
varq = var[i,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 2:
meanvar = np.empty((var.shape[0]))
varq = var[:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar = np.nansum(varmask*areamask)/np.sum(areamask)
else:
print(ValueError('Variable has the wrong dimensions!'))
print('Completed: Weighted variable average!')
print('*Completed: Finished calc_weightedAve function!')
return meanvar
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorr(varx,vary,lats,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient
Parameters
----------
varx : 2d array
vary : 2d array
lats : 1d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorr(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorr function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_SpatialCorr function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
import numpy as np
from sklearn.metrics import mean_squared_error
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeight(varx,vary,levs,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels)
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorrHeight function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeight function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeightLev(varx,vary,levs,lons,weight,levelq):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels). Change the
weighting for different level correlations
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
levelq : string (all, tropo, strato)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels)
"""
print('\n>>> Using calc_spatialCorrHeightLev function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
if levelq == 'all':
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'tropo':
gwq = np.array([1.0,1.0,1.0,1.0,0.5,0.5,0.5,0.2,0.2,0.,0.,0.,
0.,0.,0.,0.,0.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'strato':
gwq = np.array([0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.5,1.,1.,1.,1.
,1.,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeightLev function!')
return corrcoef
| 36.514469 | 95 | 0.468739 |
0a33db09aef1c74c5ffed0995a5bf7a3bfec7f84
| 13,403 |
py
|
Python
|
tests/python/unittest/test_tir_schedule_compute_inline.py
|
xiebaiyuan/tvm
|
726239d788e3b90cbe4818271ca5361c46d8d246
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
tests/python/unittest/test_tir_schedule_compute_inline.py
|
xiebaiyuan/tvm
|
726239d788e3b90cbe4818271ca5361c46d8d246
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
tests/python/unittest/test_tir_schedule_compute_inline.py
|
xiebaiyuan/tvm
|
726239d788e3b90cbe4818271ca5361c46d8d246
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
from tvm import tir
from tvm.script import ty
# pylint: disable=no-member,invalid-name,unused-variable
# pylint: enable=no-member,invalid-name,unused-variable
def test_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
def test_compute_inline_multi_consumer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
block_d = sch.get_block("D")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
assert sch.get(block_d).name_hint == "D"
def test_compute_inline_fail_multi_writer():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True, error_render_level="detail")
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_reverse_compute_inline_elementwise():
sch = tir.Schedule(elementwise, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_under_loop():
sch = tir.Schedule(elementwise_under_loop, debug_mode=True)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
def test_reverse_compute_inline_fail_as_dce():
sch = tir.Schedule(elementwise_standalone, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_b)
def test_reverse_compute_inline_fail_multi_producer():
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True)
block_d = sch.get_block("D")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_d)
def test_reverse_compute_inline_fail_multi_reader():
sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mode=True)
block_c = sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"])
def test_reverse_compute_fail_multi_reverse_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_c = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_opaque_access_load():
sch = tir.Schedule(opaque_access_load, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_opaque_access_store():
sch = tir.Schedule(opaque_access_store, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_buffer_matched():
sch = tir.Schedule(buffer_matched, debug_mode=True)
block_b = sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_compute_inline_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"])
def test_compute_inline_multi_loads():
sch = tir.Schedule(elementwise_multi_loads, debug_mode=True)
block_b = sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"])
if __name__ == "__main__":
test_compute_inline_elementwise()
test_compute_inline_under_loop()
test_compute_inline_as_dce()
test_compute_inline_multi_consumer()
test_compute_inline_fail_multi_writer()
test_reverse_compute_inline_elementwise()
test_reverse_compute_inline_under_loop()
test_reverse_compute_inline_fail_as_dce()
test_reverse_compute_inline_fail_multi_producer()
test_reverse_compute_inline_fail_multi_reader()
test_reverse_compute_multi_reverse_loads()
test_reverse_compute_fail_multi_reverse_loads()
test_opaque_access_load()
test_opaque_access_store()
test_buffer_matched()
test_compute_inline_predicate()
test_compute_inline_multi_loads()
| 35.836898 | 94 | 0.643886 |
0a3465198ac8a54def9b1ff02f89cdbec3079889
| 4,239 |
py
|
Python
|
cwl_flask.py
|
Sage-Bionetworks/workflow-service
|
8b5dc0afe9ea0972014cdf48a693ee6f893cfe5e
|
[
"Apache-2.0"
] | 1 |
2019-11-14T23:46:23.000Z
|
2019-11-14T23:46:23.000Z
|
cwl_flask.py
|
Sage-Bionetworks/workflow-service
|
8b5dc0afe9ea0972014cdf48a693ee6f893cfe5e
|
[
"Apache-2.0"
] | null | null | null |
cwl_flask.py
|
Sage-Bionetworks/workflow-service
|
8b5dc0afe9ea0972014cdf48a693ee6f893cfe5e
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, Response, request, redirect
import subprocess
import tempfile
import json
import yaml
import signal
import threading
import time
import copy
app = Flask(__name__)
jobs_lock = threading.Lock()
jobs = []
def logspooler(job):
with open(job.logname, "r") as f:
while True:
r = f.read(4096)
if r:
yield r
else:
with job.updatelock:
if job.status["state"] != "Running":
break
time.sleep(1)
if __name__ == "__main__":
# app.debug = True
app.run()
| 28.641892 | 79 | 0.517103 |
0a3472688b742e51fb849821bffb5408a0c299f0
| 5,306 |
py
|
Python
|
cs15211/ReverseBits.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1 |
2021-07-05T01:53:30.000Z
|
2021-07-05T01:53:30.000Z
|
cs15211/ReverseBits.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | null | null | null |
cs15211/ReverseBits.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1 |
2018-01-08T07:14:08.000Z
|
2018-01-08T07:14:08.000Z
|
__source__ = 'https://leetcode.com/problems/reverse-bits/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-bits.py
# Time : O(n)
# Space: O(1)
# Bit Manipulation
#
# Description: Leetcode # 190. Reverse Bits
#
# Reverse bits of a given 32 bits unsigned integer.
#
# For example, given input 43261596 (represented in binary as 00000010100101000001111010011100),
# return 964176192 (represented in binary as 00111001011110000010100101000000).
#
# Follow up:
# If this function is called many times, how would you optimize it?
#
# Companies
# Apple Airbnb
# Related Topics
# Bit Manipulation
# Similar Questions
# Number of 1 Bits
#
import unittest
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
int ret = 0;
for (int i = 0; i < 32; i++) {
if ((n & 1) != 0) {
ret |= 1; //same as // res += n & 1
}
n >>>= 1; // padding 0 on the left side
if (i < 31) { // CATCH: for last digit, don't shift!
ret <<= 1;
}
}
return ret;
}
}
We first intitialize result to 0. We then iterate from 0 to 31 (an integer has 32 bits).
In each iteration: We first shift result to the left by 1 bit.
Then, if the last digit of input n is 1, we add 1 to result.
To find the last digit of n, we just do: (n & 1)
Example, if n=5 (101), n&1 = 101 & 001 = 001 = 1; however, if n = 2 (10), n&1 = 10 & 01 = 0).
Finally, we update n by shifting it to the right by 1 (n >>= 1)
At the end of the iteration, we return result.
Example, if input n = 13 (represented in binary as
0000_0000_0000_0000_0000_0000_0000_1101, the "_" is for readability),
calling reverseBits(13) should return:
1011_0000_0000_0000_0000_0000_0000_0000
Here is how our algorithm would work for input n = 13:
Initially, result = 0 = 0000_0000_0000_0000_0000_0000_0000_0000,
n = 13 = 0000_0000_0000_0000_0000_0000_0000_1101
Starting for loop:
i = 0:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0000.
n&1 = 0000_0000_0000_0000_0000_0000_0000_1101 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0000 +
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0110.
We then go to the next iteration.
i = 1:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0010;
n&1 = 0000_0000_0000_0000_0000_0000_0000_0110 &
0000_0000_0000_0000_0000_0000_0000_0001
= 0000_0000_0000_0000_0000_0000_0000_0000 = 0;
therefore we don't increment result.
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0011.
We then go to the next iteration.
i = 2:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0100.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0011 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0100 +
0000_0000_0000_0000_0000_0000_0000_0001 =
result = 0000_0000_0000_0000_0000_0000_0000_0101
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0001.
We then go to the next iteration.
i = 3:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_1010.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0001 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
= 0000_0000_0000_0000_0000_0000_0000_1011
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0000 = 0.
Now, from here to the end of the iteration, n is 0,
so (n&1) will always be 0 and n >>=1 will not change n.
The only change will be for result <<=1, i.e. shifting result to the left by 1 digit.
Since there we have i=4 to i = 31 iterations left, this will result
in padding 28 0's to the right of result. i.e at the end,
we get result = 1011_0000_0000_0000_0000_0000_0000_0000
This is exactly what we expected to get
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
if (n == 0) return 0;
int result = 0;
for (int i = 0; i < 32; i++) {
result <<= 1;
if ((n & 1) == 1) result++;
n >>= 1;
}
return result;
}
}
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
n = ((n & 0x55555555) << 1) | ((n & 0xAAAAAAAA) >>> 1);
n = ((n & 0x33333333) << 2) | ((n & 0xCCCCCCCC) >>> 2);
n = ((n & 0x0F0F0F0F) << 4) | ((n & 0xF0F0F0F0) >>> 4);
n = ((n & 0x00FF00FF) << 8) | ((n & 0xFF00FF00) >>> 8);
return (n >>> 16) | (n << 16);
}
}
'''
| 31.963855 | 96 | 0.67942 |
0a34f204369f984c96ff6e5a370ca5dd3ef2cd5a
| 128 |
py
|
Python
|
tibanna/_version.py
|
freezecoder/tibanna
|
89b02e95d04b9c630a9786f4d1eb8157c40098e0
|
[
"MIT"
] | null | null | null |
tibanna/_version.py
|
freezecoder/tibanna
|
89b02e95d04b9c630a9786f4d1eb8157c40098e0
|
[
"MIT"
] | null | null | null |
tibanna/_version.py
|
freezecoder/tibanna
|
89b02e95d04b9c630a9786f4d1eb8157c40098e0
|
[
"MIT"
] | null | null | null |
"""Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.16.1"
| 25.6 | 76 | 0.710938 |
0a35e9528722fb698d7d9b2d769ceed182b29b73
| 1,265 |
py
|
Python
|
selective_merge_pdf.py
|
vs-slavchev/selective_merge_pdf
|
b24b4dbcaf1ffb8dc0924dafec56f94e452c1ebd
|
[
"MIT"
] | null | null | null |
selective_merge_pdf.py
|
vs-slavchev/selective_merge_pdf
|
b24b4dbcaf1ffb8dc0924dafec56f94e452c1ebd
|
[
"MIT"
] | null | null | null |
selective_merge_pdf.py
|
vs-slavchev/selective_merge_pdf
|
b24b4dbcaf1ffb8dc0924dafec56f94e452c1ebd
|
[
"MIT"
] | null | null | null |
from sys import argv
from PyPDF2 import PdfFileReader, PdfFileWriter
import re
range_pattern = re.compile(r'(\d+)(\.\.|-)(\d+)')
comma_pattern = re.compile('\d+(,\d+)*')
if __name__ == '__main__':
assert(len(argv) > 1), "usage examle:\npython3 selective_merge_pdf.py file1.pdf 1-3 file2.pdf 3,4,10 file1.pdf 50"
assert(len(argv) % 2 == 1), "invalid arguments; supply page numbers after each pdf name"
files_names = argv[1::2]
pages_args = argv[2::2]
pdf_writer = PdfFileWriter()
for file_name, pages in zip(files_names, pages_args):
pdf_reader = PdfFileReader(file_name)
last_page_index = pdf_reader.getNumPages()
pages = pages_args_to_array(pages)
pages_to_add = list(filter(lambda i: i >= 0 and i <= last_page_index, pages))
for page in pages_to_add:
pdf_writer.addPage(pdf_reader.getPage(page - 1))
with open("merged.pdf", 'wb') as out:
pdf_writer.write(out)
| 31.625 | 115 | 0.709091 |
0a366dc7ea5c7f093418a07f29237983fc6bf2d7
| 4,031 |
py
|
Python
|
vp/scoring.py
|
romack77/vp-toolbox
|
2677b78b80d0b4794735f3ee9bd70403c6b884e6
|
[
"MIT"
] | 10 |
2019-08-03T06:29:47.000Z
|
2022-02-05T03:08:15.000Z
|
vp/scoring.py
|
romack77/vp-toolbox
|
2677b78b80d0b4794735f3ee9bd70403c6b884e6
|
[
"MIT"
] | null | null | null |
vp/scoring.py
|
romack77/vp-toolbox
|
2677b78b80d0b4794735f3ee9bd70403c6b884e6
|
[
"MIT"
] | 3 |
2019-01-22T12:19:05.000Z
|
2021-02-25T16:58:59.000Z
|
import math
from vp import geom_tools
def horizon_error(ground_truth_horizon, detected_horizon, image_dims):
"""Calculates error in a detected horizon.
This measures the max distance between the detected horizon line and
the ground truth horizon line, within the image's x-axis, and
normalized by image height.
Args:
ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line.
detected_horizon: Tuple with (slope, intercept) for the detected horizon line.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
Float, or None if a horizon is missing altogether.
"""
if ground_truth_horizon is None or detected_horizon is None:
return None
width, height = image_dims
return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height
def vp_direction_error(ground_truth_vps, detected_vps, image_dims):
"""Measures error in direction from center of detected vanishing points.
Each detected VP is matched with its closest unclaimed ground truth VP.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
image_dims: Tuple of integers, (width, height) of the image, in pixels.
Returns:
List with float degrees of error for each ground truth VP.
Error is None for missing VPs.
"""
principal_point = (image_dims[0] // 2, image_dims[1] // 2)
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
gt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], gt_vp[0], gt_vp[1]))
dt_angle = geom_tools.get_line_angle((
principal_point[0], principal_point[1], dt_vp[0], dt_vp[1]))
angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180)
point_pair_dists.append((angle_diff, gt_vp, dt_vp))
point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0])
gt_vp_to_error = {}
seen_dt_vps = set()
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps:
continue
gt_vp_to_error[gt_vp] = distance
seen_dt_vps.add(dt_vp)
return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps]
def location_accuracy_error(ground_truth_vps, detected_vps):
"""Measures average error in the location of detected vanishing points.
"Missed" or "extra" VPs do not count against the score.
Based on log distance of detected vp from ground truth vp.
Args:
ground_truth_vps: List of ground truth VP point tuples.
detected_vps: List of detected VP point tuples.
Returns:
Float, error.
"""
if len(ground_truth_vps) == 0 or len(detected_vps) == 0:
return 0
point_pair_dists = []
for gt_vp in ground_truth_vps:
for dt_vp in detected_vps:
distance = geom_tools.point_to_point_dist(gt_vp, dt_vp)
point_pair_dists.append((distance, gt_vp, dt_vp))
sorted(point_pair_dists, key=lambda k: k[0])
seen_gt_vps = set()
seen_dt_vps = set()
total_error = 0
for distance, gt_vp, dt_vp in point_pair_dists:
if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps:
continue
seen_gt_vps.add(gt_vp)
seen_dt_vps.add(dt_vp)
if distance > 0:
total_error += math.log(distance)
return total_error / min(len(detected_vps), len(ground_truth_vps))
def num_model_detection_error(ground_truth_vps, detected_vps):
"""Measures error in the number of detected vanishing points.
Returns:
Integer, positive when there are too many VPs, negative
when there are too few.
"""
return len(detected_vps) - len(ground_truth_vps)
| 34.161017 | 86 | 0.675019 |
0a36be6ff9e65c7b1ffad1c7ff8f47b4ee0f6df3
| 4,175 |
py
|
Python
|
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py
|
collassubmission91/CompoSuite-Code
|
ac544efb68a11ed8a483b0932975c4949f0cec90
|
[
"MIT"
] | null | null | null |
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py
|
collassubmission91/CompoSuite-Code
|
ac544efb68a11ed8a483b0932975c4949f0cec90
|
[
"MIT"
] | null | null | null |
compositional-rl-benchmark/composition/spinningup_training/train_mtl_ppo.py
|
collassubmission91/CompoSuite-Code
|
ac544efb68a11ed8a483b0932975c4949f0cec90
|
[
"MIT"
] | null | null | null |
import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
if __name__ == '__main__':
main()
| 43.489583 | 199 | 0.697725 |
0a3711b515419fb6ad721023cf62fe24b0ba8280
| 15,121 |
py
|
Python
|
igvm/cli.py
|
innogames/igvm
|
6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e
|
[
"MIT"
] | 14 |
2018-02-15T14:09:54.000Z
|
2021-07-19T01:55:58.000Z
|
igvm/cli.py
|
innogames/igvm
|
6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e
|
[
"MIT"
] | 129 |
2018-02-19T09:47:18.000Z
|
2022-03-02T14:08:10.000Z
|
igvm/cli.py
|
innogames/igvm
|
6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e
|
[
"MIT"
] | 10 |
2018-02-16T15:56:59.000Z
|
2021-05-14T23:31:31.000Z
|
"""igvm - The command line interface
Copyright (c) 2017 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser, _SubParsersAction
from logging import StreamHandler, root as root_logger
import time
from fabric.network import disconnect_all
from igvm.commands import (
change_address,
disk_set,
evacuate,
host_info,
mem_set,
vcpu_set,
vm_build,
vm_delete,
vm_migrate,
vm_rename,
vm_restart,
vm_start,
vm_stop,
vm_sync, vm_define,
)
from igvm.libvirt import close_virtconns
def parse_args():
top_parser = IGVMArgumentParser('igvm')
top_parser.add_argument('--silent', '-s', action='count', default=0)
top_parser.add_argument('--verbose', '-v', action='count', default=0)
subparsers = top_parser.add_subparsers(help='Actions')
subparser = subparsers.add_parser(
'build',
description=vm_build.__doc__,
)
subparser.set_defaults(func=vm_build)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--postboot',
metavar='postboot_script',
help='Run postboot_script on the guest after first boot',
)
subparser.add_argument(
'--skip-puppet',
action='store_false',
dest='run_puppet',
help='Skip running puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow building on a Host which has the state online_reserved',
)
subparser.add_argument(
'--rebuild',
dest='rebuild',
action='store_true',
help='Rebuild already defined VM or build it if not defined',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'migrate',
description=vm_migrate.__doc__,
)
subparser.set_defaults(func=vm_migrate)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'hypervisor_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--run-puppet',
action='store_true',
help='Run puppet in chroot before powering up',
)
subparser.add_argument(
'--debug-puppet',
action='store_true',
help='Run puppet in debug mode',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Force offline migration',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
choices=('drbd', 'netcat', 'xfs'),
help=(
'Specify drbd (default), netcat or xfs transport to migrate '
'disk image'
),
)
subparser.add_argument(
'--no-shutdown',
action='store_true',
help=(
'Don\'t shutdown VM during offline migration, igvm will wait for'
' operator to shut down VM for 24h.'
),
)
subparser.add_argument(
'--enforce-vm-env',
dest='enforce_vm_env',
action='store_true',
help='Build or migrate VM only to a HV with the same environment of VM'
)
subparser.add_argument(
'--disk-size',
dest='disk_size',
type=int,
help='Resize disk of migrated VM. Expects new size in GiB. '
'Works only with --offline --offline-transport=xfs',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'change-address',
description=disk_set.__doc__,
)
subparser.set_defaults(func=change_address)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_address',
help=(
'New IPv4 address of VM'
)
)
subparser.add_argument(
'--offline',
action='store_true',
help='Perform IP address change offline',
)
subparser.add_argument(
'--migrate',
action='store_true',
help='Migrate VM to new HV while changing IP address',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migration to a Host which has the state online_reserved',
)
subparser.add_argument(
'--offline-transport',
default='drbd',
help=(
'Specify drbd (default) or netcat transport to migrate disk image'
),
)
subparser = subparsers.add_parser(
'disk-set',
description=disk_set.__doc__,
)
subparser.set_defaults(func=disk_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New disk size with an optional unit (default GiB). '
'Can be specified relative with "+". Only integers are allowed'
)
)
subparser = subparsers.add_parser(
'mem-set',
description=mem_set.__doc__,
)
subparser.set_defaults(func=mem_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'size',
help=(
'New memory size with optional unit (default is MiB).'
'Only integers are allowed.'
),
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change memory, and restart VM',
)
subparser = subparsers.add_parser(
'vcpu-set',
description=vcpu_set.__doc__,
)
subparser.set_defaults(func=vcpu_set)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'count',
type=int,
help='New number of CPUs',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, change CPUs, and restart VM',
)
subparser = subparsers.add_parser(
'start',
description=vm_start.__doc__,
)
subparser.set_defaults(func=vm_start)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--unretire',
nargs='?',
const='maintenance',
help='Unretire a VM, set it to given state, maintenance by default',
)
subparser = subparsers.add_parser(
'stop',
description=vm_stop.__doc__,
)
subparser.set_defaults(func=vm_stop)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Retire VM after stopping it',
)
subparser = subparsers.add_parser(
'restart',
description=vm_restart.__doc__,
)
subparser.set_defaults(func=vm_restart)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--force',
action='store_true',
help='Do not wait for guest to shutdown gracefully',
)
subparser.add_argument(
'--no-redefine',
action='store_true',
help='Do not redefine the domain to use latest hypervisor settings',
)
subparser = subparsers.add_parser(
'delete',
description=vm_delete.__doc__,
)
subparser.set_defaults(func=vm_delete)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'--retire',
action='store_true',
help='Set VM state to "retired" on Serveradmin instead of deleting',
)
subparser = subparsers.add_parser(
'info',
description=host_info.__doc__,
)
subparser.set_defaults(func=host_info)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'sync',
description=vm_sync.__doc__,
)
subparser.set_defaults(func=vm_sync)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser = subparsers.add_parser(
'rename',
description=vm_rename.__doc__,
)
subparser.set_defaults(func=vm_rename)
subparser.add_argument(
'vm_hostname',
help='Hostname of the guest system',
)
subparser.add_argument(
'new_hostname',
help='New hostname',
)
subparser.add_argument(
'--offline',
action='store_true',
help='Shutdown VM, if running',
)
subparser = subparsers.add_parser(
'evacuate',
description=evacuate.__doc__,
)
subparser.set_defaults(func=evacuate)
subparser.add_argument(
'hv_hostname',
help='Hostname of the hypervisor',
)
subparser.add_argument(
'dst_hv_hostname',
nargs='?',
default=None,
help='Hostname of destination hypervisor',
)
subparser.add_argument(
'--dry-run',
action='store_true',
help='Do not migrate but just print what would be done'
)
subparser.add_argument(
'--offline',
nargs='*',
help='Migrate VMs matching the given serveradmin function offline',
)
subparser.add_argument(
'--ignore-reserved',
dest='allow_reserved_hv',
action='store_true',
help='Allow migrating to a host which has the state online_reserved',
)
subparser.add_argument(
'--soft-preferences',
dest='soft_preferences',
action='store_true',
help='Overrules all preferences so that Hypervisors are not excluded. '
'Use this if igvm fails to find a matching Hypervisor, but you '
'are in urgent need to do it anyway. Hint: If igvm fails to find '
'a matching Hypervisor something might be really wrong. Run igvm '
'with --verbose to check why it fails finding a Hypervisor.',
)
subparser = subparsers.add_parser(
'define',
description=vm_define.__doc__,
)
subparser.set_defaults(func=vm_define)
subparser.add_argument('vm_hostname', help='Hostname of the guest system')
return vars(top_parser.parse_args())
def main():
args = parse_args()
configure_root_logger(args.pop('silent'), args.pop('verbose'))
try:
args.pop('func')(**args)
finally:
# Fabric requires the disconnect function to be called after every
# use. We are also taking our chance to disconnect from
# the hypervisors.
disconnect_all()
close_virtconns()
# The underlying library of Fabric, Paramiko, raises an error, on
# destruction right after the disconnect function is called. We are
# sleeping for a little while to avoid this.
time.sleep(0.1)
def configure_root_logger(silent, verbose):
root_logger.addHandler(IGVMLogHandler())
# We are summing up the silent and verbose arguments in here. It
# is not really meaningful to use them both, but giving an error is not
# better. See Python logging library documentation [1] for the levels.
# Paramiko is overly verbose. We configure it for one level higher.
#
# [1] https://docs.python.org/library/logging.html#logging-levels
level = 20 + (silent - verbose) * 10
root_logger.setLevel(level)
root_logger.getChild('paramiko').setLevel(level + 10)
| 29.824458 | 79 | 0.612195 |
0a390498151447698302dd1d056f6ca3842fd3c6
| 987 |
py
|
Python
|
test/test_data_processor/test_condition_generation_dataset.py
|
puraminy/OpenPrompt
|
49f0ed9719bb6285e94c746de4511991c848492c
|
[
"Apache-2.0"
] | 979 |
2021-09-30T15:32:58.000Z
|
2022-03-31T11:23:03.000Z
|
test/test_data_processor/test_condition_generation_dataset.py
|
Spritebai/OpenPrompt
|
bd9ea544ab144d94af32d245101ba35c9d5a5a65
|
[
"Apache-2.0"
] | 104 |
2021-10-01T07:56:33.000Z
|
2022-03-31T14:39:09.000Z
|
test/test_data_processor/test_condition_generation_dataset.py
|
Spritebai/OpenPrompt
|
bd9ea544ab144d94af32d245101ba35c9d5a5a65
|
[
"Apache-2.0"
] | 121 |
2021-09-30T16:09:53.000Z
|
2022-03-31T09:39:34.000Z
|
import os, sys
from os.path import dirname as d
from os.path import abspath, join
root_dir = d(d(d(abspath(__file__))))
sys.path.append(root_dir)
from openprompt.data_utils.conditional_generation_dataset import PROCESSORS
base_path = os.path.join(root_dir, "datasets/CondGen")
| 41.125 | 98 | 0.761905 |
0a393fec60ca724f475a9fdf13a20c1df07768c4
| 5,354 |
py
|
Python
|
BaseTools/Source/Python/Common/BuildToolError.py
|
JayLeeCompal/EDKII_Git
|
de4800d50e1f357002bf77235d3bebabd0c00007
|
[
"MIT"
] | 1 |
2022-01-20T04:51:29.000Z
|
2022-01-20T04:51:29.000Z
|
BaseTools/Source/Python/Common/BuildToolError.py
|
JayLeeCompal/EDKII_Git
|
de4800d50e1f357002bf77235d3bebabd0c00007
|
[
"MIT"
] | 1 |
2022-01-21T06:19:02.000Z
|
2022-01-21T06:19:02.000Z
|
BaseTools/Source/Python/Common/BuildToolError.py
|
JayLeeCompal/EDKII_Git
|
de4800d50e1f357002bf77235d3bebabd0c00007
|
[
"MIT"
] | null | null | null |
## @file
# Standardized Error Hanlding infrastructures.
#
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
FILE_OPEN_FAILURE = 1
FILE_WRITE_FAILURE = 2
FILE_PARSE_FAILURE = 3
FILE_READ_FAILURE = 4
FILE_CREATE_FAILURE = 5
FILE_CHECKSUM_FAILURE = 6
FILE_COMPRESS_FAILURE = 7
FILE_DECOMPRESS_FAILURE = 8
FILE_MOVE_FAILURE = 9
FILE_DELETE_FAILURE = 10
FILE_COPY_FAILURE = 11
FILE_POSITIONING_FAILURE = 12
FILE_ALREADY_EXIST = 13
FILE_NOT_FOUND = 14
FILE_TYPE_MISMATCH = 15
FILE_CASE_MISMATCH = 16
FILE_DUPLICATED = 17
FILE_UNKNOWN_ERROR = 0x0FFF
OPTION_UNKNOWN = 0x1000
OPTION_MISSING = 0x1001
OPTION_CONFLICT = 0x1002
OPTION_VALUE_INVALID = 0x1003
OPTION_DEPRECATED = 0x1004
OPTION_NOT_SUPPORTED = 0x1005
OPTION_UNKNOWN_ERROR = 0x1FFF
PARAMETER_INVALID = 0x2000
PARAMETER_MISSING = 0x2001
PARAMETER_UNKNOWN_ERROR =0x2FFF
FORMAT_INVALID = 0x3000
FORMAT_NOT_SUPPORTED = 0x3001
FORMAT_UNKNOWN = 0x3002
FORMAT_UNKNOWN_ERROR = 0x3FFF
RESOURCE_NOT_AVAILABLE = 0x4000
RESOURCE_ALLOCATE_FAILURE = 0x4001
RESOURCE_FULL = 0x4002
RESOURCE_OVERFLOW = 0x4003
RESOURCE_UNDERRUN = 0x4004
RESOURCE_UNKNOWN_ERROR = 0x4FFF
ATTRIBUTE_NOT_AVAILABLE = 0x5000
ATTRIBUTE_GET_FAILURE = 0x5001
ATTRIBUTE_SET_FAILURE = 0x5002
ATTRIBUTE_UPDATE_FAILURE = 0x5003
ATTRIBUTE_ACCESS_DENIED = 0x5004
ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF
IO_NOT_READY = 0x6000
IO_BUSY = 0x6001
IO_TIMEOUT = 0x6002
IO_UNKNOWN_ERROR = 0x6FFF
COMMAND_FAILURE = 0x7000
PERMISSION_FAILURE = 0x8000
CODE_ERROR = 0xC0DE
AUTOGEN_ERROR = 0xF000
PARSER_ERROR = 0xF001
BUILD_ERROR = 0xF002
GENFDS_ERROR = 0xF003
ECC_ERROR = 0xF004
EOT_ERROR = 0xF005
DDC_ERROR = 0xF009
WARNING_AS_ERROR = 0xF006
MIGRATION_ERROR = 0xF010
PCD_VALIDATION_INFO_ERROR = 0xF011
PCD_VARIABLE_ATTRIBUTES_ERROR = 0xF012
PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR = 0xF013
ABORT_ERROR = 0xFFFE
UNKNOWN_ERROR = 0xFFFF
## Error message of each error code
gErrorMessage = {
FILE_NOT_FOUND : "File/directory not found in workspace",
FILE_OPEN_FAILURE : "File open failure",
FILE_WRITE_FAILURE : "File write failure",
FILE_PARSE_FAILURE : "File parse failure",
FILE_READ_FAILURE : "File read failure",
FILE_CREATE_FAILURE : "File create failure",
FILE_CHECKSUM_FAILURE : "Invalid checksum of file",
FILE_COMPRESS_FAILURE : "File compress failure",
FILE_DECOMPRESS_FAILURE : "File decompress failure",
FILE_MOVE_FAILURE : "File move failure",
FILE_DELETE_FAILURE : "File delete failure",
FILE_COPY_FAILURE : "File copy failure",
FILE_POSITIONING_FAILURE: "Failed to seeking position",
FILE_ALREADY_EXIST : "File or directory already exists",
FILE_TYPE_MISMATCH : "Incorrect file type",
FILE_CASE_MISMATCH : "File name case mismatch",
FILE_DUPLICATED : "Duplicated file found",
FILE_UNKNOWN_ERROR : "Unknown error encountered on file",
OPTION_UNKNOWN : "Unknown option",
OPTION_MISSING : "Missing option",
OPTION_CONFLICT : "Conflict options",
OPTION_VALUE_INVALID : "Invalid value of option",
OPTION_DEPRECATED : "Deprecated option",
OPTION_NOT_SUPPORTED : "Unsupported option",
OPTION_UNKNOWN_ERROR : "Unknown error when processing options",
PARAMETER_INVALID : "Invalid parameter",
PARAMETER_MISSING : "Missing parameter",
PARAMETER_UNKNOWN_ERROR : "Unknown error in parameters",
FORMAT_INVALID : "Invalid syntax/format",
FORMAT_NOT_SUPPORTED : "Not supported syntax/format",
FORMAT_UNKNOWN : "Unknown format",
FORMAT_UNKNOWN_ERROR : "Unknown error in syntax/format ",
RESOURCE_NOT_AVAILABLE : "Not available",
RESOURCE_ALLOCATE_FAILURE : "Allocate failure",
RESOURCE_FULL : "Full",
RESOURCE_OVERFLOW : "Overflow",
RESOURCE_UNDERRUN : "Underrun",
RESOURCE_UNKNOWN_ERROR : "Unknown error",
ATTRIBUTE_NOT_AVAILABLE : "Not available",
ATTRIBUTE_GET_FAILURE : "Failed to retrieve",
ATTRIBUTE_SET_FAILURE : "Failed to set",
ATTRIBUTE_UPDATE_FAILURE: "Failed to update",
ATTRIBUTE_ACCESS_DENIED : "Access denied",
ATTRIBUTE_UNKNOWN_ERROR : "Unknown error when accessing",
COMMAND_FAILURE : "Failed to execute command",
IO_NOT_READY : "Not ready",
IO_BUSY : "Busy",
IO_TIMEOUT : "Timeout",
IO_UNKNOWN_ERROR : "Unknown error in IO operation",
UNKNOWN_ERROR : "Unknown error",
}
## Exception indicating a fatal error
if __name__ == "__main__":
pass
| 33.886076 | 85 | 0.699664 |
0a3943aef4b92eda2997e8228a72ccdd4b255c3d
| 1,360 |
py
|
Python
|
datasets/SUN397EncodbleDataset.py
|
allenai/ViRB
|
fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b
|
[
"Apache-2.0"
] | 26 |
2021-05-19T13:49:53.000Z
|
2022-02-10T16:33:47.000Z
|
datasets/SUN397EncodbleDataset.py
|
allenai/ViRB
|
fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b
|
[
"Apache-2.0"
] | null | null | null |
datasets/SUN397EncodbleDataset.py
|
allenai/ViRB
|
fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b
|
[
"Apache-2.0"
] | 1 |
2021-06-07T02:55:30.000Z
|
2021-06-07T02:55:30.000Z
|
import torch
import torchvision.transforms as transforms
from torch.utils.data import Dataset
import glob
from PIL import Image
import random
| 34 | 97 | 0.625735 |
0a3a46f51a8f874a867b535822da740830faf6e6
| 966 |
py
|
Python
|
cybox/common/location.py
|
tirkarthi/python-cybox
|
a378deb68b3ac56360c5cc35ff5aad1cd3dcab83
|
[
"BSD-3-Clause"
] | 40 |
2015-03-05T18:22:51.000Z
|
2022-03-06T07:29:25.000Z
|
cybox/common/location.py
|
tirkarthi/python-cybox
|
a378deb68b3ac56360c5cc35ff5aad1cd3dcab83
|
[
"BSD-3-Clause"
] | 106 |
2015-01-12T18:52:20.000Z
|
2021-04-25T22:57:52.000Z
|
cybox/common/location.py
|
tirkarthi/python-cybox
|
a378deb68b3ac56360c5cc35ff5aad1cd3dcab83
|
[
"BSD-3-Clause"
] | 30 |
2015-03-25T07:24:40.000Z
|
2021-07-23T17:10:11.000Z
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields
import cybox
import cybox.bindings.cybox_common as common_binding
| 26.108108 | 65 | 0.704969 |
0a3be6996ac9517d3022400855065d32ff7ed3c0
| 1,359 |
py
|
Python
|
scripts/bam-stats.py
|
varlociraptor/prosic-evaluation
|
f4f1950ba5c10bda0f41df2a8f519d98f779d736
|
[
"MIT"
] | 2 |
2020-04-29T00:56:09.000Z
|
2021-03-07T19:59:06.000Z
|
scripts/bam-stats.py
|
varlociraptor/prosic-evaluation
|
f4f1950ba5c10bda0f41df2a8f519d98f779d736
|
[
"MIT"
] | null | null | null |
scripts/bam-stats.py
|
varlociraptor/prosic-evaluation
|
f4f1950ba5c10bda0f41df2a8f519d98f779d736
|
[
"MIT"
] | 1 |
2022-03-15T12:23:03.000Z
|
2022-03-15T12:23:03.000Z
|
#!/usr/bin/env python
import sys
import numpy as np
import pandas as pd
import pysam
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import seaborn as sns
from functools import partial
tumor = pysam.AlignmentFile(snakemake.input[0], "rb")
normal = pysam.AlignmentFile(snakemake.input[1], "rb")
softclips = []
for i, rec in enumerate(normal):
if rec.is_supplementary or rec.is_unmapped:
continue
is_first_read = rec.pos < rec.mpos
get_clip = lambda c: c[1] if c[0] == 4 else None
clip_left = get_clip(rec.cigartuples[0])
if clip_left is not None:
softclips.append([clip_left, True, is_first_read])
clip_right = get_clip(rec.cigartuples[-1])
if clip_right is not None:
softclips.append([clip_right, False, is_first_read])
if i == 10000000:
break
softclips = pd.DataFrame(softclips, columns=["len", "left", "first_in_pair"])
g = sns.FacetGrid(softclips, col="left", row="first_in_pair")
g = g.map(plot, "len")
plt.savefig(snakemake.output[0])
| 28.3125 | 92 | 0.675497 |
0a3bec6c960ec5a80b8e4e32d4669b80255b605f
| 1,114 |
py
|
Python
|
app/rss_feeder_api/migrations/0003_auto_20200813_1623.py
|
RSaab/rss-scraper
|
9bf608878e7d08fea6508ae90b27f1c226b313f1
|
[
"MIT"
] | null | null | null |
app/rss_feeder_api/migrations/0003_auto_20200813_1623.py
|
RSaab/rss-scraper
|
9bf608878e7d08fea6508ae90b27f1c226b313f1
|
[
"MIT"
] | null | null | null |
app/rss_feeder_api/migrations/0003_auto_20200813_1623.py
|
RSaab/rss-scraper
|
9bf608878e7d08fea6508ae90b27f1c226b313f1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1 on 2020-08-13 16:23
from django.db import migrations, models
import django.utils.timezone
| 29.315789 | 107 | 0.577199 |
0a3c1af48960fabf760e667011b0450023e75e10
| 4,849 |
py
|
Python
|
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py
|
MetaMain/BewareAdvML
|
52d489b565b0df36cb588b5709c29c2e8e4d3f49
|
[
"BSD-3-Clause"
] | 1 |
2022-03-25T07:53:13.000Z
|
2022-03-25T07:53:13.000Z
|
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py
|
MetaMain/BewareAdvML
|
52d489b565b0df36cb588b5709c29c2e8e4d3f49
|
[
"BSD-3-Clause"
] | null | null | null |
AdversarialSampleGeneratorV11/AdversarialSampleGeneratorV11/ResNetConstructor.py
|
MetaMain/BewareAdvML
|
52d489b565b0df36cb588b5709c29c2e8e4d3f49
|
[
"BSD-3-Clause"
] | null | null | null |
import tensorflow
from tensorflow import keras
Model = keras.models.Model
Dense = keras.layers.Dense
Activation = keras.layers.Activation
Flatten = keras.layers.Flatten
BatchNormalization= keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
AveragePooling2D = keras.layers.AveragePooling2D
Input=keras.layers.Input
l2=keras.regularizers.l2
from tensorflow.keras import backend
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
| 37.589147 | 80 | 0.545061 |
0a3cda3b610042fefd30969a702f9d925c74876f
| 4,421 |
py
|
Python
|
ttl2json.py
|
the-norman-sicily-project/genealogical-trees
|
32fa4f25861ae34543b0a6b95e54842c0018331b
|
[
"MIT"
] | 1 |
2021-05-18T20:39:30.000Z
|
2021-05-18T20:39:30.000Z
|
ttl2json.py
|
the-norman-sicily-project/genealogical-trees
|
32fa4f25861ae34543b0a6b95e54842c0018331b
|
[
"MIT"
] | null | null | null |
ttl2json.py
|
the-norman-sicily-project/genealogical-trees
|
32fa4f25861ae34543b0a6b95e54842c0018331b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import json
import rdflib
import rdflib.plugins.sparql as sparql
RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf']
RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent',
'isGreatGrandParentOf', 'isUncleOf', 'hasUncle',
'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt',
'isGreatAuntOf', 'hasGreatAunt',
'isBrotherOf', 'isSisterOf', 'isSiblingOf',
'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf']
RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER
try:
workpath = sys.argv[1]
except IndexError:
sys.exit("No path defined!")
try:
recursion_limit = int(sys.argv[2])
except IndexError:
recursion_limit = 0
if recursion_limit > 0:
sys.setrecursionlimit(recursion_limit)
g = rdflib.Graph()
g.parse(workpath, format="turtle")
fhkb_str = "http://www.example.com/genealogy.owl#"
schema_str = "https://schema.org/"
FHKB = rdflib.Namespace(fhkb_str)
SCHEMA_ORG = rdflib.Namespace(schema_str)
graph = {}
graph['nodes'] = []
graph['edges'] = []
nodes = {}
q = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
}
ORDER BY ?person""")
for rel in RELS_OF_INTEREST:
pred = rdflib.URIRef("{}{}".format(fhkb_str, rel))
relation_query_results = g.query(q, initBindings={'pred': pred})
for (subj, pred, obj) in relation_query_results:
graph['edges'].append(
{
'data': {
'group': 'edges',
'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}',
'source': dump(subj),
'target': dump(obj),
'type': dump(pred)
}
})
q_details = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
FILTER NOT EXISTS {
?person ?testPred ?obj .
VALUES ?testPred {
fhkb:isWifeOf
fhkb:isMotherOf
fhkb:isFatherOf
fhkb:isHusbandOf
fhkb:isSpouseOf
fhkb:hasGrandParent
fhkb:isGrandParentOf
fhkb:hasGreatGrandParent
fhkb:isGreatGrandParentOf
fhkb:isUncleOf
fhkb:hasUncle
fhkb:isGreatUncleOf
fhkb:hasGreatUncle
fhkb:isAuntOf
fhkb:hasAunt
fhkb:isGreatAuntOf
fhkb:hasGreatAunt
fhkb:isBrotherOf
fhkb:isSisterOf
fhkb:isSiblingOf
fhkb:isFirstCousinOf
fhkb:isSecondCousinOf
fhkb:isThirdCousinOf
fhkb:hasRelation
fhkb:isPartnerIn
fhkb:isMalePartnerIn
fhkb:isFemalePartnerIn
fhkb:isBloodrelationOf
}
}
}
ORDER BY ?person"""
)
person_query_results = g.query(q_details)
for (subj, pred, obj) in person_query_results:
node = nodes.get(dump(subj), {
'data': {
'label': '',
'degree': 0,
'size': 10,
'alternateNames': [],
'honorificPrefixes': [],
'honorificSuffixes': [],
'images': [],
'id': dump(subj),
}})
if pred == FHKB.Sex:
node['data'][dump(pred)] = dump(obj)
elif pred.startswith(SCHEMA_ORG):
if dump(pred) == 'honorificSuffix':
node['data']['honorificSuffixes'].append(obj)
elif dump(pred) == 'honorificPrefix':
node['data']['honorificPrefixes'].append(obj)
elif dump(pred) == 'alternateName':
node['data']['alternateNames'].append(obj)
elif dump(pred) == 'image':
node['data']['images'].append(obj)
else:
node['data'][dump(pred)] = obj
elif pred == rdflib.RDFS.label:
node['data']['label'] = obj
else:
continue
nodes[dump(subj)] = node
graph['nodes'] = list(nodes.values())
print(json.dumps(graph, indent=0))
sys.exit(0)
| 28.339744 | 84 | 0.555078 |
0a3d017dc9b9c85df909d024333ec6af657c45e5
| 53,871 |
py
|
Python
|
tests/rest/client/test_login.py
|
BearerPipelineTest/synapse-1
|
78b99de7c206b106340e12cdee0af9aa246bd5ad
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/client/test_login.py
|
BearerPipelineTest/synapse-1
|
78b99de7c206b106340e12cdee0af9aa246bd5ad
|
[
"Apache-2.0"
] | null | null | null |
tests/rest/client/test_login.py
|
BearerPipelineTest/synapse-1
|
78b99de7c206b106340e12cdee0af9aa246bd5ad
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import urllib.parse
from typing import Any, Dict, List, Optional, Union
from unittest.mock import Mock
from urllib.parse import urlencode
import pymacaroons
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
import synapse.rest.admin
from synapse.appservice import ApplicationService
from synapse.rest.client import devices, login, logout, register
from synapse.rest.client.account import WhoamiRestServlet
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
from synapse.types import create_requester
from synapse.util import Clock
from tests import unittest
from tests.handlers.test_oidc import HAS_OIDC
from tests.handlers.test_saml import has_saml2
from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG
from tests.server import FakeChannel
from tests.test_utils.html_parsers import TestHtmlParser
from tests.unittest import HomeserverTestCase, override_config, skip_unless
try:
import jwt
HAS_JWT = True
except ImportError:
HAS_JWT = False
# synapse server name: used to populate public_baseurl in some tests
SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse"
# public_baseurl for some tests. It uses an http:// scheme because
# FakeChannel.isSecure() returns False, so synapse will see the requested uri as
# http://..., so using http in the public_baseurl stops Synapse trying to redirect to
# https://....
BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,)
# CAS server used in some tests
CAS_SERVER = "https://fake.test"
# just enough to tell pysaml2 where to redirect to
SAML_SERVER = "https://test.saml.server/idp/sso"
TEST_SAML_METADATA = """
<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata">
<md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
<md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/>
</md:IDPSSODescriptor>
</md:EntityDescriptor>
""" % {
"SAML_SERVER": SAML_SERVER,
}
LOGIN_URL = b"/_matrix/client/r0/login"
TEST_URL = b"/_matrix/client/r0/account/whoami"
# a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is +
TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="f%26=o"'
# the query params in TEST_CLIENT_REDIRECT_URL
EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("<ab c>", ""), ('q" =+"', '"f&=o"')]
# (possibly experimental) login flows we expect to appear in the list after the normal
# ones
ADDITIONAL_LOGIN_FLOWS = [
{"type": "m.login.application_service"},
{"type": "uk.half-shot.msc2778.login.application_service"},
]
def test_login_with_overly_long_device_id_fails(self) -> None:
self.register_user("mickey", "cheese")
# create a device_id longer than 512 characters
device_id = "yolo" * 512
body = {
"type": "m.login.password",
"user": "mickey",
"password": "cheese",
"device_id": device_id,
}
# make a login request with the bad device_id
channel = self.make_request(
"POST",
"/_matrix/client/v3/login",
json.dumps(body).encode("utf8"),
custom_headers=None,
)
# test that the login fails with the correct error code
self.assertEqual(channel.code, 400)
self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM")
# The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use
# RSS256, with a public key configured in synapse as "jwt_secret", and tokens
# signed by the private key.
AS_USER = "as_user_alice"
| 40.443694 | 119 | 0.626218 |
0a3d26451658f18eb6e4d945d41095c7fba3dc44
| 1,683 |
py
|
Python
|
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py
|
Kevinskwk/rmf_demos
|
2d7b9c7c75211b89b91977e5d1a66f440cc5df95
|
[
"Apache-2.0"
] | null | null | null |
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py
|
Kevinskwk/rmf_demos
|
2d7b9c7c75211b89b91977e5d1a66f440cc5df95
|
[
"Apache-2.0"
] | null | null | null |
rmf_demo_tasks/rmf_demo_tasks/request_delivery.py
|
Kevinskwk/rmf_demos
|
2d7b9c7c75211b89b91977e5d1a66f440cc5df95
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from time import sleep
import uuid
import rclpy
from rmf_task_msgs.msg import Delivery
if __name__ == '__main__':
main(sys.argv)
| 27.590164 | 88 | 0.680333 |
0a3d8aa1a0610f6e6749b406310d289569ef5143
| 13,701 |
py
|
Python
|
dis_snek/api/http/http_client.py
|
BoredManCodes/Dis-Snek
|
662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b
|
[
"MIT"
] | null | null | null |
dis_snek/api/http/http_client.py
|
BoredManCodes/Dis-Snek
|
662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b
|
[
"MIT"
] | null | null | null |
dis_snek/api/http/http_client.py
|
BoredManCodes/Dis-Snek
|
662dbc3f86c133fd704c22d3d6d55af5ee1f6f5b
|
[
"MIT"
] | null | null | null |
"""This file handles the interaction with discords http endpoints."""
import asyncio
import logging
from typing import Any, Dict, Optional, Union
from urllib.parse import quote as _uriquote
from weakref import WeakValueDictionary
import aiohttp
from aiohttp import BaseConnector, ClientSession, ClientWebSocketResponse, FormData
from multidict import CIMultiDictProxy
from dis_snek.api.http.http_requests import (
BotRequests,
ChannelRequests,
EmojiRequests,
GuildRequests,
InteractionRequests,
MemberRequests,
MessageRequests,
ReactionRequests,
StickerRequests,
ThreadRequests,
UserRequests,
WebhookRequests,
ScheduledEventsRequests,
)
from dis_snek.client.const import __py_version__, __repo_url__, __version__, logger_name, MISSING, Absent
from dis_snek.client.errors import DiscordError, Forbidden, GatewayNotFound, HTTPException, NotFound, LoginError
from dis_snek.client.utils.input_utils import response_decode
from dis_snek.client.utils.serializer import dict_filter_missing
from dis_snek.models import CooldownSystem
from .route import Route
__all__ = ["HTTPClient"]
log = logging.getLogger(logger_name)
| 38.485955 | 160 | 0.593387 |
0a3e0181aa1152f21b25e174598bc71d6679ab76
| 986 |
py
|
Python
|
config.py
|
conradsuuna/uac-computer-competency
|
40f8b165e5432ca22ab97838f424e26650a3d300
|
[
"MIT"
] | null | null | null |
config.py
|
conradsuuna/uac-computer-competency
|
40f8b165e5432ca22ab97838f424e26650a3d300
|
[
"MIT"
] | null | null | null |
config.py
|
conradsuuna/uac-computer-competency
|
40f8b165e5432ca22ab97838f424e26650a3d300
|
[
"MIT"
] | null | null | null |
from os import environ
import psycopg2
from datetime import timedelta
from dotenv import load_dotenv
load_dotenv()
| 27.388889 | 80 | 0.720081 |
0a3e6de6fa0adef7035c5c9d0aedbcc9e7f13b79
| 791 |
py
|
Python
|
electrum/version.py
|
c4pt000/electrum-radiocoin
|
7cb5f618a9aa8cd03d60191624a0e57cc24646d2
|
[
"MIT"
] | null | null | null |
electrum/version.py
|
c4pt000/electrum-radiocoin
|
7cb5f618a9aa8cd03d60191624a0e57cc24646d2
|
[
"MIT"
] | null | null | null |
electrum/version.py
|
c4pt000/electrum-radiocoin
|
7cb5f618a9aa8cd03d60191624a0e57cc24646d2
|
[
"MIT"
] | null | null | null |
ELECTRUM_VERSION = '4.1.5-radc' # version of the client package
APK_VERSION = '4.1.5.0' # read by buildozer.spec
PROTOCOL_VERSION = '1.4' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_SW = '100' # Segwit wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
| 34.391304 | 67 | 0.668774 |
0a3ee6851d0a7ef05afcdf6a271167449fef0269
| 7,867 |
py
|
Python
|
lib/layers/functions/prior_box.py
|
arleyzhang/object-detection-pytorch
|
de96a507e6643a7019b94d92f77219439ccca29f
|
[
"MIT"
] | 4 |
2018-10-10T03:06:38.000Z
|
2018-12-18T07:32:30.000Z
|
lib/layers/functions/prior_box.py
|
arleyzhang/object-detection-pytorch
|
de96a507e6643a7019b94d92f77219439ccca29f
|
[
"MIT"
] | null | null | null |
lib/layers/functions/prior_box.py
|
arleyzhang/object-detection-pytorch
|
de96a507e6643a7019b94d92f77219439ccca29f
|
[
"MIT"
] | 1 |
2018-10-10T03:06:39.000Z
|
2018-10-10T03:06:39.000Z
|
from __future__ import division
from math import sqrt as sqrt
from itertools import product as product
import torch
import numpy as np
import cv2
from lib.utils.visualize_utils import TBWriter
def vis(func):
"""tensorboard visualization if has writer as input"""
return wrapper
class PriorBoxSSD(PriorBoxBase):
# PriorBox = PriorBoxSSD
if __name__ == '__main__':
import copy
# from lib.datasets.config import ssd_voc_vgg as cfg
# from lib.utils.visualize_utils import TBWriter
# tb_writer = TBWriter(log_dir, {'epoch': 50})
#
# test_no_vis(cfg, tb_writer)
# test_filp(cfg, tb_writer)
# test_rectangle(cfg, tb_writer)
print('haha')
from lib.utils.config import cfg
print(cfg)
| 38.004831 | 104 | 0.583196 |
0a3fe7e6abe2393d5617b3058cbf7b54468e33ee
| 5,073 |
py
|
Python
|
python/Gaffer/SequencePath.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/Gaffer/SequencePath.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/Gaffer/SequencePath.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
| 27.721311 | 133 | 0.668244 |
0a4049bea9cce33edfb9f0362df0cd2e91b7aa1a
| 335 |
py
|
Python
|
reo/migrations/0121_merge_20211001_1841.py
|
NREL/REopt_API
|
fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6
|
[
"BSD-3-Clause"
] | 7 |
2022-01-29T12:10:10.000Z
|
2022-03-28T13:45:20.000Z
|
reo/migrations/0121_merge_20211001_1841.py
|
NREL/reopt_api
|
fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6
|
[
"BSD-3-Clause"
] | 12 |
2022-02-01T18:23:18.000Z
|
2022-03-31T17:22:17.000Z
|
reo/migrations/0121_merge_20211001_1841.py
|
NREL/REopt_API
|
fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6
|
[
"BSD-3-Clause"
] | 3 |
2022-02-08T19:44:40.000Z
|
2022-03-12T11:05:36.000Z
|
# Generated by Django 3.1.13 on 2021-10-01 18:41
from django.db import migrations
| 20.9375 | 69 | 0.662687 |
0a42c30234b3cb9b1bf3706f896598d1f485e00b
| 7,765 |
py
|
Python
|
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852 |
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371 |
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240 |
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3
import PhysicsTools.HeppyCore.framework.config as cfg
# Find the definitions of the tau ID strings here:
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object = TauAnalyzer,
# inclusive very loose hadronic tau selection
inclusive_ptMin = 18,
inclusive_etaMax = 9999,
inclusive_dxyMax = 1000.,
inclusive_dzMax = 0.4,
inclusive_vetoLeptons = False,
inclusive_leptonVetoDR = 0.4,
inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
inclusive_tauID = "decayModeFindingNewDMs",
inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required
inclusive_tauAntiMuonID = "",
inclusive_tauAntiElectronID = "",
# loose hadronic tau selection
loose_ptMin = 18,
loose_etaMax = 9999,
loose_dxyMax = 1000.,
loose_dzMax = 0.2,
loose_vetoLeptons = True,
loose_leptonVetoDR = 0.4,
loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or ""
loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
loose_vetoLeptonsPOG = False, # If True, the following two IDs are required
loose_tauAntiMuonID = "againstMuonLoose3",
loose_tauAntiElectronID = "againstElectronLooseMVA5"
)
)
| 45.145349 | 171 | 0.619446 |
0a42e80075481314be34a3f3fd3ff44396a763e9
| 4,020 |
py
|
Python
|
vize/170401038.py
|
omuryorulmaz/kriptografi
|
04c22e4f05f126b14f41842597a7b27065326670
|
[
"Unlicense"
] | 8 |
2020-04-15T12:06:42.000Z
|
2022-01-21T10:35:51.000Z
|
vize/170401038.py
|
omuryorulmaz/kriptografi
|
04c22e4f05f126b14f41842597a7b27065326670
|
[
"Unlicense"
] | 3 |
2020-05-13T20:41:27.000Z
|
2020-06-11T00:45:27.000Z
|
vize/170401038.py
|
omuryorulmaz/kriptografi
|
04c22e4f05f126b14f41842597a7b27065326670
|
[
"Unlicense"
] | 54 |
2020-04-23T14:58:50.000Z
|
2020-06-26T06:00:32.000Z
|
# smail ALTAY 170401038
import math
import random
r = 3271
n = int(input("Oluturulmak istenen anahtar iftlerinin bit uzunluunu girin: "))
Keygen(n)
encrypt("plaintext.txt","publickey.txt")
decrypt("ciphertext.txt", "privatekey.txt")
| 26.447368 | 122 | 0.510945 |
0a42fad82c7026120ddbfdc222f7f45f5ba001fc
| 8,219 |
py
|
Python
|
seqenv/ontology.py
|
xapple/seqenv
|
a898b936b64b51340f439b05fc8909f4ed826247
|
[
"MIT"
] | 7 |
2016-12-02T09:28:00.000Z
|
2021-11-04T13:47:16.000Z
|
seqenv/ontology.py
|
xapple/seqenv
|
a898b936b64b51340f439b05fc8909f4ed826247
|
[
"MIT"
] | 7 |
2016-04-07T17:00:50.000Z
|
2018-05-14T12:16:06.000Z
|
seqenv/ontology.py
|
xapple/seqenv
|
a898b936b64b51340f439b05fc8909f4ed826247
|
[
"MIT"
] | 4 |
2016-03-15T16:41:12.000Z
|
2021-12-06T09:30:35.000Z
|
# Built-in modules #
# Internal modules #
from seqenv import module_dir
from seqenv.common.cache import property_cached
# Third party modules #
import sh, networkx
import matplotlib.colors
# A list of envos to help test this module #
test_envos = [
"ENVO:00000033",
"ENVO:00000043",
"ENVO:00000067",
"ENVO:00000143",
"ENVO:00000210",
"ENVO:00000215",
"ENVO:00000475",
]
################################################################################
def add_weights(self, g, weights=None):
"""Input a networkx DiGraph object.
Outputs a pygraphviz AGraph object."""
g = networkx.nx_agraph.to_agraph(g)
if weights is None: return g
for envo in weights:
node = g.get_node(envo)
weight = weights[envo]
color = matplotlib.colors.rgb2hex((1.0, 1.0 - weight, 0.0))
node.attr['fillcolor'] = color
return g
def add_style(self, g):
"""Input a pygraphviz AGraph object.
Outputs a pygraphviz AGraph object."""
for node in g.nodes():
text = node.attr['name']
node.attr['label'] = text.replace(' ','\\n')
node.attr['name'] = ''
node.attr['shape'] = 'Mrecord'
node.attr['style'] = 'filled'
# To add the envo id to each node, uncomment:
#envo = node.attr['label']
#node.attr['label'] = "{<f0> %s|<f1> %s}" % (envo, text)
for edge in g.edges():
if edge.attr['label'] == 'located_in': edge.attr['color'] = 'turquoise4'
edge.attr['label'] = ''
return g
def write_to_dot(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle: handle.write(g.to_string())
def add_legend(self, path):
"""Input the path to a dot file."""
legend_txt = """
digraph {
rankdir=LR
node [shape=plaintext,fontname="helvetica"]
subgraph cluster_01 {
label = "NB: darker nodes weigh more";
key [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td align="right" port="i1">Is</td></tr>
<tr><td align="right" port="i2">Part</td></tr>
<tr><td align="right" port="i3">Located</td></tr>
</table>>];
key2 [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td port="i1">a</td></tr>
<tr><td port="i2">of</td></tr>
<tr><td port="i3">in</td></tr>
</table>>];
key:i1:e -> key2:i1:w [color=red];
key:i2:e -> key2:i2:w [color=blue];
key:i3:e -> key2:i3:w [color=turquoise4];
}"""
orig_txt = [line.rstrip('\n') for line in open(path, 'r') if line]
new_text = [line.lstrip() for line in legend_txt.split('\n') if line]
new_text = '\n'.join(new_text + orig_txt[2:])
with open(path, 'w') as handle: handle.write(new_text)
def draw_to_pdf(self, in_path, out_path):
"""Input a path to a dot file."""
sh.dot(in_path, '-Tpdf', '-o', out_path)
# --------------------------- In this section --------------------------- #
# descends
def descends(self, e, root):
"""Does the envo term `e` descend from the node `root`?
Returns True or False."""
# Auto conversion #
if isinstance(e, int): e = "ENVO:%08d" % e
if isinstance(root, int): root = "ENVO:%08d" % root
# Return #
return e in networkx.ancestors(self.networkx, root)
# --------------------------- In this section --------------------------- #
# print_test
# draw_with_networkx
# draw_with_pygraphviz
def print_test(self, e=None):
"""Just a method to see a bit how the different libraries work."""
# Test node #
if e is None: e = test_envos[0]
# Goa #
print "Goa: "
print self.goatools[e]
# Pygraphviz #
print "pygraphviz: "
print self.pygraphviz[e]
print self.pygraphviz.successors(e)
print self.pygraphviz.predecessors(e)
print self.pygraphviz.get_node(e)
# Networkx #
import networkx
print "networkx: "
print self.networkx[e]
print self.networkx.successors(e)
print self.networkx.predecessors(e)
print networkx.ancestors(self.networkx, e) # same as predecessors
print networkx.descendants(self.networkx, e) # almost as child_to_parents
def draw_with_networkx(self, g, path):
"""Input a networkx DiGraph object."""
from matplotlib import pyplot
networkx.draw(g)
pyplot.savefig(path)
pyplot.close()
def draw_with_pygraphviz(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle:
handle.write(g.to_string())
| 35.426724 | 90 | 0.565397 |
0a433c84b0dfa57ea11f80f51e65908aaa8c4377
| 87,582 |
py
|
Python
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 1 |
2020-03-05T18:10:35.000Z
|
2020-03-05T18:10:35.000Z
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 2 |
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_11_04/models/_models.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
| 36.706622 | 283 | 0.650042 |
0a435a4c25f9daef6a9569a6a1c22b40cc97a64d
| 18,579 |
py
|
Python
|
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
|
cognifloyd/st2-open-rbac
|
fb3251223743e497267277fe9f5cef91f41ade34
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
|
cognifloyd/st2-open-rbac
|
fb3251223743e497267277fe9f5cef91f41ade34
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/controllers/v1/test_rbac_for_supported_st2api_endpoints.py
|
cognifloyd/st2-open-rbac
|
fb3251223743e497267277fe9f5cef91f41ade34
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import six
import mock
from st2common.services import triggers as trigger_service
with mock.patch.object(trigger_service, 'create_trigger_type_db', mock.MagicMock()):
from st2api.controllers.v1.webhooks import HooksHolder
from st2common.persistence.rbac import UserRoleAssignment
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.service_setup import register_service_in_service_registry
from st2common.services import coordination
from st2tests import config as tests_config
from st2tests.fixturesloader import FixturesLoader
from open_rbac.tests import APIControllerWithRBACTestCase
from tests.unit.controllers.v1.test_webhooks import DUMMY_TRIGGER_DICT
http_client = six.moves.http_client
__all__ = [
'APIControllersRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = OrderedDict([
('runners', ['testrunner1.yaml', 'run-local.yaml']),
('sensors', ['sensor1.yaml']),
('actions', ['action1.yaml', 'local.yaml']),
('aliases', ['alias1.yaml']),
('triggers', ['trigger1.yaml', 'cron1.yaml']),
('rules', ['rule1.yaml']),
('triggertypes', ['triggertype1.yaml']),
('executions', ['execution1.yaml']),
('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']),
('enforcements', ['enforcement1.yaml']),
('apikeys', ['apikey1.yaml']),
('traces', ['trace_for_test_enforce.yaml'])
])
MOCK_RUNNER_1 = {
'name': 'test-runner-1',
'description': 'test',
'enabled': False
}
MOCK_ACTION_1 = {
'name': 'ma.dummy.action',
'pack': 'examples',
'description': 'test description',
'enabled': True,
'entry_point': '/tmp/test/action2.py',
'runner_type': 'local-shell-script',
'parameters': {
'c': {'type': 'string', 'default': 'C1', 'position': 0},
'd': {'type': 'string', 'default': 'D1', 'immutable': True}
}
}
MOCK_ACTION_ALIAS_1 = {
'name': 'alias3',
'pack': 'aliases',
'description': 'test description',
'action_ref': 'core.local',
'formats': ['a', 'b']
}
MOCK_RULE_1 = {
'enabled': True,
'name': 'st2.test.rule2',
'pack': 'yoyohoneysingh',
'trigger': {
'type': 'wolfpack.triggertype-1'
},
'criteria': {
'trigger.k1': {
'pattern': 't1_p_v',
'type': 'equals'
}
},
'action': {
'ref': 'sixpack.st2.test.action',
'parameters': {
'ip2': '{{rule.k1}}',
'ip1': '{{trigger.t1_p}}'
}
},
'description': ''
}
def test_icon_png_file_is_whitelisted(self):
self.use_user(self.users['no_permissions'])
# Test that access to icon.png file doesn't require any permissions
response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png')
self.assertEqual(response.status_code, http_client.OK)
# Other files should return forbidden
response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml',
expect_errors=True)
self.assertEqual(response.status_code, http_client.FORBIDDEN)
def _perform_request_for_endpoint(self, endpoint):
if endpoint['method'] == 'GET':
response = self.app.get(endpoint['path'], expect_errors=True)
elif endpoint['method'] == 'POST':
return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'PUT':
return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True)
elif endpoint['method'] == 'DELETE':
return self.app.delete(endpoint['path'], expect_errors=True)
else:
raise ValueError('Unsupported method: %s' % (endpoint['method']))
return response
| 33.78 | 98 | 0.466171 |
0a444a2a9b00c93ede978edd61b59c20a6608e93
| 5,351 |
py
|
Python
|
testing/scripts/test_ksonnet_single_namespace.py
|
dtrawins/seldon-core
|
3d8b3791b343118953757a1e787e5919cc64e697
|
[
"Apache-2.0"
] | null | null | null |
testing/scripts/test_ksonnet_single_namespace.py
|
dtrawins/seldon-core
|
3d8b3791b343118953757a1e787e5919cc64e697
|
[
"Apache-2.0"
] | null | null | null |
testing/scripts/test_ksonnet_single_namespace.py
|
dtrawins/seldon-core
|
3d8b3791b343118953757a1e787e5919cc64e697
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import time
import subprocess
from subprocess import run,Popen
from seldon_utils import *
from k8s_utils import *
| 50.481132 | 245 | 0.657073 |
0a44501273cac10db9558b11e30ff5a413b1857f
| 89 |
py
|
Python
|
enthought/envisage/safeweakref.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3 |
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/envisage/safeweakref.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1 |
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/envisage/safeweakref.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from envisage.safeweakref import *
| 22.25 | 38 | 0.842697 |
0a448d09286de882fe626777f47593a108a44caa
| 628 |
py
|
Python
|
test_app/models.py
|
alissonmuller/django-group-by
|
645c36ad2c3ab1f4691de6fcc04fed8b5d7ef78d
|
[
"MIT"
] | 25 |
2016-09-29T15:25:16.000Z
|
2021-09-19T14:20:58.000Z
|
test_app/models.py
|
alissonmuller/django-group-by
|
645c36ad2c3ab1f4691de6fcc04fed8b5d7ef78d
|
[
"MIT"
] | 22 |
2016-05-29T00:14:47.000Z
|
2019-06-08T13:24:21.000Z
|
test_app/models.py
|
alissonmuller/django-group-by
|
645c36ad2c3ab1f4691de6fcc04fed8b5d7ef78d
|
[
"MIT"
] | 2 |
2018-09-24T07:28:39.000Z
|
2019-02-12T14:09:18.000Z
|
from django.db import models
from .query import BookQuerySet
| 23.259259 | 56 | 0.732484 |
0a4491bed67c4627a06dabc6e88940ee8f57226d
| 14,777 |
py
|
Python
|
ResNet/dropblock.py
|
whj363636/CamDrop
|
f8af8c200665145f112b59348f60fc4cf80f04ec
|
[
"MIT"
] | null | null | null |
ResNet/dropblock.py
|
whj363636/CamDrop
|
f8af8c200665145f112b59348f60fc4cf80f04ec
|
[
"MIT"
] | null | null | null |
ResNet/dropblock.py
|
whj363636/CamDrop
|
f8af8c200665145f112b59348f60fc4cf80f04ec
|
[
"MIT"
] | 1 |
2021-11-06T11:22:49.000Z
|
2021-11-06T11:22:49.000Z
|
# -*- coding: utf-8 -*-
# File: dropblock.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
# from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.models import GlobalAvgPooling, FullyConnected
import tensorflow as tf
__all__ = ['dropblock', 'dropblock2','dropblock3','dropblock4'] # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout
def dropblock(net, keep_prob, dropblock_size, gap_w=None, label=None, G=None, CG=None, data_format='channels_first'):
"""DropBlock: a regularization method for convolutional neural networks.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
See https://arxiv.org/pdf/1810.12890.pdf for details.
Args:
net: `Tensor` input tensor.
is_training: `bool` for whether the model is training.
keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None"
means no DropBlock.
dropblock_size: `int` size of blocks to be dropped by DropBlock.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A version of input tensor with DropBlock applied.
Raises:
if width and height of the input tensor are not equal.
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, _ = net.get_shape().as_list()
else:
_, _, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (
width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(
valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(
tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(
block_pattern, net.dtype)
return net
def dropblock2(net, keep_prob, dropblock_size, G=None, CG=None, data_format='channels_first'):
"""
mimic GN
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
if G == None: G = C // CG
if CG == None: CG = C // G
net = tf.reshape(net, [N, G, CG, height, width])
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
# seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2)
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0) # for depth
valid_block_center = tf.expand_dims(valid_block_center, 0) # for batch
valid_block_center = tf.expand_dims(valid_block_center, 0) # for channel
randnoise = tf.random_uniform([N, G, 1, width, height], dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(block_pattern, axis=[2, 3, 4], keepdims=True)
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = tf.reduce_max(-block_pattern, reduction_indices=[2])
block_pattern = -tf.nn.max_pool(block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW')
block_pattern = tf.expand_dims(block_pattern, 2)
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
net = tf.reshape(net, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height, width])
return net
def CamDrop(net, keep_prob, dropblock_size, flag=None, label=None, G=None, CG=None, data_format='channels_first'):
'''CamDrop'''
def _get_cam(net, label, flag, dropblock_size, data_format='channels_first'):
'''
net: [N, C, H, W]
gap_w : [gap_C, num_of_class]
'''
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
gap_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'linear/W') if flag > 0 else None
if not gap_w is None:
gap_w = tf.convert_to_tensor(gap_w, tf.float32)
gap_C, num = tf.squeeze(gap_w, 0).get_shape().as_list() # [gap_C, num]
gap_w = tf.reshape(gap_w, [C, gap_C//C, num])
gap_w = tf.reduce_mean(gap_w, reduction_indices=[1]) # [C, num]
label = tf.gather(tf.transpose(gap_w), label) # [N, C]
# spatial
weights = tf.expand_dims(label, 2) # [N, C, 1]
net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
cam = tf.matmul(weights, net, transpose_a=True) # [N, 1, width*height]
# spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True))
# cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width])
# cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW')
# left_or_top = (dropblock_size-1) // 2
# right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1
# cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]])
# cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width])
k = tf.cast(height*width/dropblock_size**2, tf.int32)
topk, _ = tf.math.top_k(cam, k=k) # [N, 1, k]
topk = tf.gather(topk, indices=[k-1], axis=-1) # [N, 1, 1]
spt_mask = (cam < topk)
spt_mask = tf.reshape(spt_mask, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(spt_mask, [N, 1, height, width])
# channel
k = tf.cast(C/8, tf.int32)
topk, _ = tf.math.top_k(label, k=k+1) # [N, k]
topk = tf.gather(topk, indices=k, axis=1) # [N, 1]
topk = tf.expand_dims(topk, 1) # [N, C, 1]
chan_mask = (label < topk)
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1]
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1, 1]
cam_mask = tf.logical_or(spt_mask, chan_mask)
# chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width])
# chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width])
# chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width])
else:
cam_mask = False
return cam_mask
# def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'):
# # Conv layer tensor [?,2048,10,10]
# def _compute_gradients(tensor, var_list):
# grads = tf.gradients(tensor, var_list)
# return [grad if grad is not None else tf.zeros_like(var)
# for var, grad in zip(var_list, grads)]
# # grads = tf.gradients(cost, net)[0]
# if not gap_w is None:
# # Normalizing the gradients
# if data_format == 'channels_last':
# N, height, width, C = net.get_shape().as_list()
# else:
# N, C, height, width = net.get_shape().as_list()
# N = tf.shape(net)[0]
# grads = _compute_gradients(cost, [net])[0]
# norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5))
# weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C]
# weights = tf.expand_dims(weights, 2) # [N, C, 1]
# net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
# # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1]
# cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height]
# cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height]
# cam = cam_mean*cam_chan
# # Passing through ReLU
# cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True)
# cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width])
# else:
# cam = 0.
# return cam
# def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20):
# if logits == False:
# return logits
# U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1)
# y = logits - tf.log(-tf.log(U + eps) + eps)
# cam_mask = tf.nn.softmax(y / tau)
# topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1]
# topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1)
# topk = tf.expand_dims(topk, 1) # [N, C, 1]
# cam_mask = (cam_mask < topk)
# # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32)
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1]
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1]
# return cam_mask
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, C = net.get_shape().as_list()
else:
_, C, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
N = tf.shape(net)[0]
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
cam_mask = _get_cam(net, label, flag, dropblock_size, data_format)
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast((1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.logical_or(block_pattern, cam_mask)
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
return net
| 45.891304 | 166 | 0.663667 |
0a44f5460d17d97fc0728fbb786ff7e11153576a
| 2,374 |
py
|
Python
|
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
|
Purple-PI/rlstructures
|
9b201b083715bbda2f3534b010c84e11dfc0a1c7
|
[
"MIT"
] | 281 |
2021-01-13T14:20:23.000Z
|
2022-03-23T08:46:56.000Z
|
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
|
Purple-PI/rlstructures
|
9b201b083715bbda2f3534b010c84e11dfc0a1c7
|
[
"MIT"
] | 2 |
2021-01-22T23:28:34.000Z
|
2021-04-29T22:05:42.000Z
|
tutorial/deprecated/tutorial_recurrent_policy/main_a2c.py
|
Purple-PI/rlstructures
|
9b201b083715bbda2f3534b010c84e11dfc0a1c7
|
[
"MIT"
] | 13 |
2021-01-15T14:53:32.000Z
|
2022-03-22T11:12:54.000Z
|
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlstructures import logging
from rlstructures.env_wrappers import GymEnv, GymEnvInf
from rlstructures.tools import weight_init
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from tutorial.tutorial_recurrent_policy.agent import RecurrentAgent
from tutorial.tutorial_recurrent_policy.a2c import A2C
import gym
from gym.wrappers import TimeLimit
# We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes
if __name__ == "__main__":
# We use spawn mode such that most of the environment will run in multiple processes
import torch.multiprocessing as mp
mp.set_start_method("spawn")
config = {
"env_name": "CartPole-v0",
"a2c_timesteps": 3,
"n_envs": 4,
"max_episode_steps": 100,
"env_seed": 42,
"n_threads": 4,
"n_evaluation_threads": 2,
"n_evaluation_episodes": 256,
"time_limit": 3600,
"lr": 0.001,
"discount_factor": 0.95,
"critic_coef": 1.0,
"entropy_coef": 0.01,
"a2c_coef": 1.0,
"logdir": "./results",
}
exp = Experiment(config, create_env, create_train_env, create_agent)
exp.run()
| 29.675 | 156 | 0.700505 |
0a458f21d72d88857440d86e340b226a075998cd
| 19,980 |
py
|
Python
|
dashboard/gnd-app.py
|
buchmuseum/GND_Dashboard
|
c8c039bc8c09c480fc5ab8a0b186cd9dc37d7423
|
[
"CC0-1.0"
] | 5 |
2021-01-21T17:54:23.000Z
|
2021-08-09T07:34:10.000Z
|
dashboard/gnd-app.py
|
buchmuseum/GND_Dashboard
|
c8c039bc8c09c480fc5ab8a0b186cd9dc37d7423
|
[
"CC0-1.0"
] | 2 |
2021-07-27T13:38:06.000Z
|
2021-08-05T16:01:19.000Z
|
dashboard/gnd-app.py
|
buchmuseum/GND_Dashboard
|
c8c039bc8c09c480fc5ab8a0b186cd9dc37d7423
|
[
"CC0-1.0"
] | 2 |
2021-03-02T12:48:14.000Z
|
2021-07-17T08:48:48.000Z
|
from matplotlib.pyplot import title
import streamlit as st
import pandas as pd
import altair as alt
import pydeck as pdk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
#main
st.title('GND-Dashboard')
#infoebereich oben
with st.beta_container():
st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Whlen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfgbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.')
with st.beta_expander("Methodik und Datenherkunft"):
st.markdown('''
Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tontrger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.
Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr groen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.
Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).
Fr grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.
Alle Skripte und Daten stehen unter CC0 Lizenz und knnen frei weitergenutzt werden.
Die Daten werden monatlich aktualisiert.
''')
#sidebar mit satzartenfilter
st.sidebar.header("Satzart whlen")
satzart = st.sidebar.selectbox(
"ber welche GND-Satzart mchten Sie etwas erfahren?",
('alle', "Tp - Personen", "Tb - Krperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen")
)
st.sidebar.info('Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie Christian Baumann geschrieben. Sie gehren zur Python Community der Deutschen Nationalbibliothek.')
gnd_allgemein = st.beta_container()
with gnd_allgemein:
st.header('GND Statistik allgemein')
#allgemeine statistiken in abhngigkeit der satzart
if satzart == 'alle':
gesamt_entity_count()
entities()
newcomer()
zeitverlauf()
relationen()
systematik()
else:
entities()
newcomer()
#besondere widgets fr einzelne satzarten
if satzart == "Tp - Personen":
wirkungsorte()
elif satzart == "Tg - Geografika":
wirkungsorte_musik()
wirkungsorte()
elif satzart == "Ts - Sachbegriffe":
sachbegriff_cloud()
systematik_ts()
dnb = st.beta_container()
with dnb:
st.header('GND in der Deutschen Nationalbibliothek')
gnd_top()
dnb_links()
streamlit_analytics.stop_tracking()
| 50.71066 | 479 | 0.682382 |
0a47a03d932bb0b243e96234078c92e60fb74516
| 891 |
py
|
Python
|
seamo/support/seamo_exceptions.py
|
amandalynne/Seattle-Mobility-Index
|
f21d2fa6913ce9474aedc298e9e4a6e7c9390e64
|
[
"MIT"
] | 3 |
2018-08-20T18:34:03.000Z
|
2018-10-02T23:41:52.000Z
|
seamo/support/seamo_exceptions.py
|
amandalynne/Seattle-Mobility-Index
|
f21d2fa6913ce9474aedc298e9e4a6e7c9390e64
|
[
"MIT"
] | null | null | null |
seamo/support/seamo_exceptions.py
|
amandalynne/Seattle-Mobility-Index
|
f21d2fa6913ce9474aedc298e9e4a6e7c9390e64
|
[
"MIT"
] | 1 |
2018-10-02T23:42:24.000Z
|
2018-10-02T23:42:24.000Z
|
"""
Class for all excpetions used in following scripts
- geocoder.py
- geocoder_input.py
"""
| 27 | 81 | 0.740741 |
0a4800ab1d62e509adf8e4628718cf0758bb7bb5
| 3,189 |
py
|
Python
|
vize/150401052/sunucu.py
|
hasan-se/blm304
|
893d15282497a426ff96b0c8b6c77d57c406742e
|
[
"Unlicense"
] | 1 |
2021-05-04T21:46:08.000Z
|
2021-05-04T21:46:08.000Z
|
vize/150401052/sunucu.py
|
hasan-se/blm304
|
893d15282497a426ff96b0c8b6c77d57c406742e
|
[
"Unlicense"
] | null | null | null |
vize/150401052/sunucu.py
|
hasan-se/blm304
|
893d15282497a426ff96b0c8b6c77d57c406742e
|
[
"Unlicense"
] | null | null | null |
#Erdin Alhas 150401052
import os
import sys
import time
from socket import *
from os import system, name
ip = '127.0.0.1'
port = 42
s_soket = socket(AF_INET, SOCK_DGRAM)
s_soket.bind((ip, port))
print("\nSunucu Hazir\n")
kontrol, istemciAdres = s_soket.recvfrom(4096)
s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres)
i, istemciAdres = s_soket.recvfrom(4096)
if(i.decode("utf-8") == "listeleme yap"):
dosyalar = "\n".join(os.listdir())
s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres)
sys.exit()
elif(i.decode("utf-8") == "put yap"):
cevap = s_soket.recvfrom(4096)
if(cevap[0].decode("utf-8") == "mevcut"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
dosyaIcerigi = s_soket.recvfrom(4096)
if(os.path.exists(dosyaIsmi.decode("utf-8")) == True):
s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres)
karar = s_soket.recvfrom(4096)
if(karar[0].decode("utf-8") == "1"):
yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt"
dosyaYeni = open(yeniAd, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
print("\nPUT islemi basariyla gerceklesti..")
else:
dosyaYeni = open(dosyaIsmi, "wb")
dosyaYeni.write(dosyaIcerigi[0])
dosyaYeni.close()
s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres)
print("\nPUT islemi basariyla gerceklesti..")
else:
print("\nGirilen adda bir dosya istemcide bulunamadi..")
elif(i.decode("utf-8") == "get yap"):
dosyaIsmi, istemciAdres = s_soket.recvfrom(4096)
if (os.path.exists(dosyaIsmi.decode("utf-8")) == True):
dosya = open(dosyaIsmi.decode("utf-8"), "rb")
s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres)
dosyaIcerik = dosya.read()
dosya.close()
s_soket.sendto(dosyaIcerik, istemciAdres)
kontrol = s_soket.recvfrom(4096)
print("\nGET islemi basariyla gerceklesti..")
sys.exit()
else:
print("\n! Bu isimde bir dosya sunucuda mevcut deil")
sys.exit()
elif(i.decode("utf-8") == "bitir"):
s_soket.close()
print("\nSunucu kapandi")
sys.exit()
| 54.050847 | 107 | 0.444967 |
0a482fa1649b42a4ec4a6b713bc6b758170e2273
| 12,096 |
py
|
Python
|
httprunner/compat.py
|
panyuan209/httprunner
|
d90f2b9ab06963e8efa1c327975fca5296d6bc39
|
[
"Apache-2.0"
] | null | null | null |
httprunner/compat.py
|
panyuan209/httprunner
|
d90f2b9ab06963e8efa1c327975fca5296d6bc39
|
[
"Apache-2.0"
] | null | null | null |
httprunner/compat.py
|
panyuan209/httprunner
|
d90f2b9ab06963e8efa1c327975fca5296d6bc39
|
[
"Apache-2.0"
] | null | null | null |
"""
This module handles compatibility issues between testcase format v2 and v3.
httprunner2 3
"""
import os
import sys
from typing import List, Dict, Text, Union, Any
from loguru import logger
from httprunner import exceptions
from httprunner.loader import load_project_meta, convert_relative_project_root_dir
from httprunner.parser import parse_data
from httprunner.utils import sort_dict_by_custom_order
def _convert_extractors(extractors: Union[List, Dict]) -> Dict:
""" convert extract list(v2) to dict(v3)
Args:
extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}]
Returns:
{"varA": "body.varA", "varB": "body.varB"}
"""
v3_extractors: Dict = {}
if isinstance(extractors, List):
# [{"varA": "content.varA"}, {"varB": "json.varB"}]
for extractor in extractors:
if not isinstance(extractor, Dict):
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in extractor.items():
v3_extractors[k] = v
elif isinstance(extractors, Dict):
# {"varA": "body.varA", "varB": "body.varB"}
v3_extractors = extractors
else:
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in v3_extractors.items():
v3_extractors[k] = _convert_jmespath(v)
return v3_extractors
def ensure_cli_args(args: List) -> List:
""" ensure compatibility with deprecated cli args in v2
"""
# remove deprecated --failfast
if "--failfast" in args:
logger.warning(f"remove deprecated argument: --failfast")
args.pop(args.index("--failfast"))
# convert --report-file to --html
if "--report-file" in args:
logger.warning(f"replace deprecated argument --report-file with --html")
index = args.index("--report-file")
args[index] = "--html"
args.append("--self-contained-html")
# keep compatibility with --save-tests in v2
if "--save-tests" in args:
logger.warning(
f"generate conftest.py keep compatibility with --save-tests in v2"
)
args.pop(args.index("--save-tests"))
_generate_conftest_for_summary(args)
return args
def ensure_path_sep(path: Text) -> Text:
""" ensure compatibility with different path separators of Linux and Windows
"""
if "/" in path:
path = os.sep.join(path.split("/"))
if "\\" in path:
path = os.sep.join(path.split("\\"))
return path
| 30.315789 | 112 | 0.61789 |
0a49026066247a3018418704bbd8ff4e56b70f26
| 2,888 |
py
|
Python
|
examples/demo/basic/scatter.py
|
ContinuumIO/chaco
|
e4a42b91cb25ef7191fd465caaef2c3256fc668e
|
[
"BSD-3-Clause"
] | 3 |
2017-09-17T17:32:06.000Z
|
2022-03-15T13:04:43.000Z
|
examples/demo/basic/scatter.py
|
ContinuumIO/chaco
|
e4a42b91cb25ef7191fd465caaef2c3256fc668e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/demo/basic/scatter.py
|
ContinuumIO/chaco
|
e4a42b91cb25ef7191fd465caaef2c3256fc668e
|
[
"BSD-3-Clause"
] | 5 |
2015-05-17T16:08:11.000Z
|
2021-02-23T09:23:42.000Z
|
"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
| 29.773196 | 80 | 0.541205 |
0a4908d9ecd7f27856ad9555eafa94debe8ca0ea
| 1,347 |
py
|
Python
|
webstr/core/config.py
|
fbalak/webstr
|
7c7e552fb9943bf664b94ca75a88747c0b243722
|
[
"Apache-2.0"
] | 3 |
2017-03-01T11:51:12.000Z
|
2018-04-16T13:09:56.000Z
|
webstr/core/config.py
|
fbalak/webstr
|
7c7e552fb9943bf664b94ca75a88747c0b243722
|
[
"Apache-2.0"
] | null | null | null |
webstr/core/config.py
|
fbalak/webstr
|
7c7e552fb9943bf664b94ca75a88747c0b243722
|
[
"Apache-2.0"
] | 1 |
2018-04-16T13:09:34.000Z
|
2018-04-16T13:09:34.000Z
|
"""
Central configuration module of webstr selenium tests.
This module provides configuration options along with default values and
function to redefine values.
"""
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
SELENIUM_LOG_LEVEL = logging.INFO
SCHEME = 'https'
PORT = 443
BROWSER = 'Firefox'
BROWSER_VERSION = ''
BROWSER_PLATFORM = 'ANY'
SELENIUM_SERVER = None
SELENIUM_PORT = 4444
BROWSER_WIDTH = 1280
BROWSER_HEIGHT = 1024
def update_value(key_name, value, force=False):
"""
Update single value of this config module.
"""
this_module = sys.modules[__name__]
key_name = key_name.upper()
# raise AttributeError if we try to define new value (unless force is used)
if not force:
getattr(this_module, key_name)
setattr(this_module, key_name, value)
| 27.489796 | 79 | 0.746845 |
0a4957ae4c91cc14cfa8216c87afffecedc2a26e
| 641 |
py
|
Python
|
operations/mutations/mutation.py
|
PiotrBosowski/feat-genes
|
8e6604fd4e121022f8ac988d9b56985de01b8331
|
[
"MIT"
] | null | null | null |
operations/mutations/mutation.py
|
PiotrBosowski/feat-genes
|
8e6604fd4e121022f8ac988d9b56985de01b8331
|
[
"MIT"
] | null | null | null |
operations/mutations/mutation.py
|
PiotrBosowski/feat-genes
|
8e6604fd4e121022f8ac988d9b56985de01b8331
|
[
"MIT"
] | null | null | null |
import random
| 32.05 | 59 | 0.620905 |
0a498f8f754b453bd4fdad3c6f6282e67b1ff4ac
| 1,551 |
py
|
Python
|
examples/CountLettersInList.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | null | null | null |
examples/CountLettersInList.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 4 |
2019-11-07T12:32:19.000Z
|
2020-07-19T14:04:44.000Z
|
examples/CountLettersInList.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 5 |
2019-12-04T15:56:55.000Z
|
2022-01-14T06:19:18.000Z
|
import RandomCharacter # Defined in Listing 6.9
def main():
"""Main."""
# Create a list of characters
chars = createList()
# Display the list
print("The lowercase letters are:")
displayList(chars)
# Count the occurrences of each letter
counts = countLetters(chars)
# Display counts
print("The occurrences of each letter are:")
displayCounts(counts)
def createList():
"""Create a list of characters."""
# Create an empty list
chars = []
# Create lowercase letters randomly and add them to the list
for i in range(100):
chars.append(RandomCharacter.getRandomLowerCaseLetter())
# Return the list
return chars
def displayList(chars):
"""Display the list of characters."""
# Display the characters in the list 20 on each line
for i in range(len(chars)):
if (i + 1) % 20 == 0:
print(chars[i])
else:
print(chars[i], end=' ')
def countLetters(chars):
"""Count the occurrences of each letter."""
# Create a list of 26 integers with initial value 0
counts = 26 * [0]
# For each lowercase letter in the list, count it
for i in range(len(chars)):
counts[ord(chars[i]) - ord('a')] += 1
return counts
def displayCounts(counts):
"""Display counts."""
for i in range(len(counts)):
if (i + 1) % 10 == 0:
print(counts[i], chr(i + ord('a')))
else:
print(counts[i], chr(i + ord('a')), end=' ')
print()
main() # Call the main function
| 23.149254 | 64 | 0.597679 |
0a49a5caf98b95481017eff59d5dcd3666b0a6ef
| 1,318 |
py
|
Python
|
ddtrace/contrib/vertica/__init__.py
|
lightstep/dd-trace-py
|
9108cbf54ff31f803eac735507ae6d2a87b9b45f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5 |
2020-03-07T01:12:29.000Z
|
2021-04-21T00:53:19.000Z
|
ddtrace/contrib/vertica/__init__.py
|
lightstep/dd-trace-py
|
9108cbf54ff31f803eac735507ae6d2a87b9b45f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 4 |
2019-11-22T20:58:01.000Z
|
2020-08-17T21:16:13.000Z
|
ddtrace/contrib/vertica/__init__.py
|
lightstep/dd-trace-py
|
9108cbf54ff31f803eac735507ae6d2a87b9b45f
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3 |
2020-03-18T16:29:20.000Z
|
2020-07-20T16:05:10.000Z
|
"""
The Vertica integration will trace queries made using the vertica-python
library.
Vertica will be automatically instrumented with ``patch_all``, or when using
the ``ls-trace-run`` command.
Vertica is instrumented on import. To instrument Vertica manually use the
``patch`` function. Note the ordering of the following statements::
from ddtrace import patch
patch(vertica=True)
import vertica_python
# use vertica_python like usual
To configure the Vertica integration globally you can use the ``Config`` API::
from ddtrace import config, patch
patch(vertica=True)
config.vertica['service_name'] = 'my-vertica-database'
To configure the Vertica integration on an instance-per-instance basis use the
``Pin`` API::
from ddtrace import Pin, patch, Tracer
patch(vertica=True)
import vertica_python
custom_tracer = Tracer()
conn = vertica_python.connect(**YOUR_VERTICA_CONFIG)
# override the service and tracer to be used
Pin.override(conn, service='myverticaservice', tracer=custom_tracer)
"""
from ...utils.importlib import require_modules
required_modules = ['vertica_python']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch, unpatch
__all__ = [patch, unpatch]
| 25.346154 | 78 | 0.741275 |
0a49e1637a3ffcd5ae7b64809f0205d8b48bfcf6
| 627 |
py
|
Python
|
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079 |
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623 |
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/python-openid-2.2.5/openid/test/test_htmldiscover.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033 |
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from openid.consumer.discover import OpenIDServiceEndpoint
import datadriven
| 28.5 | 80 | 0.660287 |
0a4ab6a6c7a8f22ae4262d99f43041e035e6b535
| 602 |
py
|
Python
|
project/settings/production.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
project/settings/production.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
project/settings/production.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
from .base import *
SECRET_KEY = get_env_var('SECRET_KEY')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = ''
USERENA_USE_HTTPS = True
| 18.8125 | 61 | 0.750831 |
0a4b453e9f68bd48c8b434b43c7c61e7c47c248d
| 3,400 |
py
|
Python
|
modelflow/graph_viz_from_outputs.py
|
ModelFlow/modelflow
|
c2b720b2da8bb17462baff5c00bbe942644474b0
|
[
"MIT"
] | 6 |
2020-07-28T19:58:28.000Z
|
2021-05-01T18:51:37.000Z
|
modelflow/graph_viz_from_outputs.py
|
ModelFlow/modelflow
|
c2b720b2da8bb17462baff5c00bbe942644474b0
|
[
"MIT"
] | 81 |
2020-07-30T07:08:10.000Z
|
2021-07-28T02:17:43.000Z
|
modelflow/graph_viz_from_outputs.py
|
ModelFlow/modelflow
|
c2b720b2da8bb17462baff5c00bbe942644474b0
|
[
"MIT"
] | null | null | null |
import pandas as pd
import argparse
import json
try:
from graphviz import Digraph
except:
print("Note: Optional graphviz not installed")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Graph Viz')
parser.add_argument('-f', '--output_file', type=str,
help='The output file to generate a graph of', required=True)
args = parser.parse_args()
main(args)
| 32.380952 | 95 | 0.577941 |
0a4c02948fcf1ba6f4a5b3cae666b7bb9cd4c29a
| 4,508 |
py
|
Python
|
src/command/voice_log/chart.py
|
link1345/Vol-GameClanTools-DiscordBot
|
c3349f38d59cba59161b8c54c172e39ba873c53d
|
[
"MIT"
] | null | null | null |
src/command/voice_log/chart.py
|
link1345/Vol-GameClanTools-DiscordBot
|
c3349f38d59cba59161b8c54c172e39ba873c53d
|
[
"MIT"
] | 25 |
2021-08-11T13:02:18.000Z
|
2021-08-20T23:24:19.000Z
|
src/command/voice_log/chart.py
|
link1345/Vol-GameClanTools-DiscordBot
|
c3349f38d59cba59161b8c54c172e39ba873c53d
|
[
"MIT"
] | null | null | null |
import discord
import os
import json
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from pprint import pprint
import base.ColorPrint as CPrint
import command.voice_log.Config_Main as CSetting
| 23.479167 | 154 | 0.675244 |
0a4c9bc797a43e5add896c8bde8af43dfb42905c
| 23,511 |
py
|
Python
|
python/src/vmaf/core/feature_extractor.py
|
jayholman/vmaf
|
0bba4faf68ab89e38314cc596e6908b4fb83984d
|
[
"Apache-2.0"
] | 40 |
2019-07-04T06:22:10.000Z
|
2022-03-10T14:49:33.000Z
|
python/src/vmaf/core/feature_extractor.py
|
jayholman/vmaf
|
0bba4faf68ab89e38314cc596e6908b4fb83984d
|
[
"Apache-2.0"
] | null | null | null |
python/src/vmaf/core/feature_extractor.py
|
jayholman/vmaf
|
0bba4faf68ab89e38314cc596e6908b4fb83984d
|
[
"Apache-2.0"
] | 8 |
2019-08-20T08:14:41.000Z
|
2021-11-18T07:01:19.000Z
|
from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
| 44.02809 | 146 | 0.663562 |
0a4d54d89c32a47c57e2c8a928a39b69e030c881
| 35 |
py
|
Python
|
notebooks/_solutions/pandas_02_basic_operations28.py
|
rprops/Python_DS-WS
|
b2fc449a74be0c82863e5fcf1ddbe7d64976d530
|
[
"BSD-3-Clause"
] | 65 |
2017-03-21T09:15:40.000Z
|
2022-02-01T23:43:08.000Z
|
notebooks/_solutions/pandas_02_basic_operations28.py
|
rprops/Python_DS-WS
|
b2fc449a74be0c82863e5fcf1ddbe7d64976d530
|
[
"BSD-3-Clause"
] | 100 |
2016-12-15T03:44:06.000Z
|
2022-03-07T08:14:07.000Z
|
notebooks/_solutions/pandas_02_basic_operations28.py
|
rprops/Python_DS-WS
|
b2fc449a74be0c82863e5fcf1ddbe7d64976d530
|
[
"BSD-3-Clause"
] | 52 |
2016-12-19T07:48:52.000Z
|
2022-02-19T17:53:48.000Z
|
df['Age'].hist() #bins=30, log=True
| 35 | 35 | 0.628571 |
0a4ed29474e7c8d2e3be0b36b2cae77e32eb65c8
| 376 |
py
|
Python
|
controller/base_service.py
|
oopsteams/pansite
|
11896842da66efc72c26eab071f7f802b982f435
|
[
"MIT"
] | null | null | null |
controller/base_service.py
|
oopsteams/pansite
|
11896842da66efc72c26eab071f7f802b982f435
|
[
"MIT"
] | 1 |
2021-06-02T01:00:41.000Z
|
2021-06-02T01:00:41.000Z
|
controller/base_service.py
|
oopsteams/pansite
|
11896842da66efc72c26eab071f7f802b982f435
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created by susy at 2019/11/8
"""
from dao.dao import DataDao
import pytz
from dao.models import PanAccounts
from cfg import PAN_SERVICE, MASTER_ACCOUNT_ID
| 23.5 | 88 | 0.726064 |
0a4f114d5336abdf79c1eeb8751aaf58a158b4d8
| 1,382 |
py
|
Python
|
transformerquant/modules/attention/multi_head.py
|
StateOfTheArt-quant/transformerquant
|
f6775d7aa920b84908b0a09d9ba098b1fe87bdff
|
[
"Apache-2.0"
] | 22 |
2019-11-02T12:00:38.000Z
|
2022-02-16T08:00:36.000Z
|
transformerquant/modules/attention/multi_head.py
|
StateOfTheArt-quant/transformerquant
|
f6775d7aa920b84908b0a09d9ba098b1fe87bdff
|
[
"Apache-2.0"
] | null | null | null |
transformerquant/modules/attention/multi_head.py
|
StateOfTheArt-quant/transformerquant
|
f6775d7aa920b84908b0a09d9ba098b1fe87bdff
|
[
"Apache-2.0"
] | 6 |
2020-04-19T08:10:03.000Z
|
2021-12-07T05:59:46.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch.nn as nn
from .single import attention
| 35.435897 | 92 | 0.595514 |
0a50b1cbdb77a8f4dc63235c790b37c0f8c9b3d2
| 5,658 |
py
|
Python
|
avatar/generalization.py
|
Julian-Theis/AVATAR
|
24fcd6eaa26f413be528a160d865d5d7e49a780b
|
[
"MIT"
] | 7 |
2020-12-22T12:09:14.000Z
|
2022-03-29T12:50:35.000Z
|
avatar/generalization.py
|
ProminentLab/AVATAR
|
a20c767d8739a52f538927b4ec3d528952263d5a
|
[
"MIT"
] | 10 |
2020-11-13T17:45:59.000Z
|
2022-02-10T00:50:38.000Z
|
avatar/generalization.py
|
ProminentLab/AVATAR
|
a20c767d8739a52f538927b4ec3d528952263d5a
|
[
"MIT"
] | 2 |
2020-03-26T22:27:27.000Z
|
2020-07-07T22:36:41.000Z
|
import os, time, argparse
from datetime import datetime
from pm4py.objects.log.importer.csv import factory as csv_importer
from pm4py.objects.log.exporter.xes import factory as xes_exporter
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.objects.petri.importer import pnml as pnml_importer
from pm4py.evaluation.replay_fitness import factory as replay_factory
from pm4py.evaluation.precision import factory as precision_factory
from conf.settings import DATA_PATH
WORK_PATH = os.path.abspath(os.getcwd())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system', help='Which system (e.g. pb_system_5_3)', required=True)
parser.add_argument('-sfx', '--suffix', help='Suffix (chosen epoch, e.g. 1981)', required=True)
parser.add_argument('-j', '--job', help='Job (0/1)', required=True)
parser.add_argument('-pn', '--pn', help='Petri net file to evaluate', required=True)
parser.add_argument('-strategy', '--strategy', help='naive/mh', required=True)
args = parser.parse_args()
system = args.system
suffix = int(args.suffix)
job = args.job
pn = args.pn
strategy = args.strategy
if DATA_PATH is None:
train_file = os.path.join(WORK_PATH, "data", "variants", system + "_train.txt")
gen_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(WORK_PATH, "data", "pns", system, pn)
else:
train_file = os.path.join(DATA_PATH, "variants", system + "_train.txt")
gen_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt")
csv_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv")
xes_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes")
pn_file = os.path.join(DATA_PATH, "pns", system, pn)
""" READ FILES AND CONVERT TO XES """
traces = readFile(train_file,gen_file, unique=True)
convertToCsv(traces=traces, to_path=csv_file)
time.sleep(1)
log = csv_importer.import_event_log(csv_file)
xes_exporter.export_log(log, xes_file)
time.sleep(1)
""" PERFORM MEASUREMENT ON PN AND XES"""
log = xes_importer.import_log(xes_file)
net, initial_marking, final_marking = pnml_importer.import_net(pn_file)
fitness = replay_factory.apply(log, net, initial_marking, final_marking)
print("Fitness=", fitness)
precision = precision_factory.apply(log, net, initial_marking, final_marking)
print("Precision=", precision)
fitness = fitness["log_fitness"]
generalization = 2 * ((fitness * precision) / (fitness + precision))
if strategy == "mh":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using MH SAMPLING on suffix ", str(suffix)," ***")
elif strategy == "naive":
print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using NAIVE SAMPLING on suffix ", str(suffix), " ***")
else:
raise ValueError("Unknown strategy.")
print("AVATAR Generalization=", generalization)
| 39.84507 | 166 | 0.61824 |
0a515a3d5abf09db1a4745bebd807a1a69030c04
| 219 |
py
|
Python
|
Introductions/The Rust Programming Language/embed/bindings/embed.py
|
uqtimes/Rust-SampleCodes
|
f9d7a040d8198acd30bf3423e7c6cf52bc9c7b6e
|
[
"MIT"
] | null | null | null |
Introductions/The Rust Programming Language/embed/bindings/embed.py
|
uqtimes/Rust-SampleCodes
|
f9d7a040d8198acd30bf3423e7c6cf52bc9c7b6e
|
[
"MIT"
] | null | null | null |
Introductions/The Rust Programming Language/embed/bindings/embed.py
|
uqtimes/Rust-SampleCodes
|
f9d7a040d8198acd30bf3423e7c6cf52bc9c7b6e
|
[
"MIT"
] | null | null | null |
# $ python embed.py
from ctypes import cdll
lib = cdll.LoadLibrary("../target/release/libembed.dylib") #=> for Mac
#lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux
lib.process()
print("done!")
| 19.909091 | 70 | 0.689498 |
0a51f623e8f7d8887b5aa54af4a94e17cde8759e
| 6,960 |
py
|
Python
|
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py
|
handsome-baby/huaweicloud-sdk-python-v3
|
6cdcf1da8b098427e58fc3335a387c14df7776d0
|
[
"Apache-2.0"
] | 1 |
2021-04-16T07:59:28.000Z
|
2021-04-16T07:59:28.000Z
|
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-image/huaweicloudsdkimage/v1/image_client.py
|
Lencof/huaweicloud-sdk-python-v3
|
d13dc4e2830a83e295be6e4de021999b3376e34e
|
[
"Apache-2.0"
] | 1 |
2022-01-17T02:24:18.000Z
|
2022-01-17T02:24:18.000Z
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
| 32.985782 | 135 | 0.653305 |
0a52fbe0941050c6bef7a51be53e3c24aa5d63bd
| 18,295 |
py
|
Python
|
frank_wolfe.py
|
ebezzam/PolyatomicFW_SPL
|
7fbbead5a642915c4bb4d061006b7dac8f6af788
|
[
"MIT"
] | null | null | null |
frank_wolfe.py
|
ebezzam/PolyatomicFW_SPL
|
7fbbead5a642915c4bb4d061006b7dac8f6af788
|
[
"MIT"
] | null | null | null |
frank_wolfe.py
|
ebezzam/PolyatomicFW_SPL
|
7fbbead5a642915c4bb4d061006b7dac8f6af788
|
[
"MIT"
] | 1 |
2022-02-23T07:18:03.000Z
|
2022-02-23T07:18:03.000Z
|
import numpy as np
from typing import Optional, Any
from pandas import DataFrame
from copy import deepcopy
from abc import abstractmethod
from utils import TimedGenericIterativeAlgorithm
import pycsou.core as pcore
import pycsou.linop as pl
from pycsou.func.penalty import L1Norm
from pycsou.func.loss import SquaredL2Loss
from pycsou.opt.proxalgs import APGD
| 54.287834 | 135 | 0.574857 |
0a5476d41706f1feaac0aa6254b09258ce332025
| 1,700 |
py
|
Python
|
lib/rdflib-3.1.0/test/test_trix_serialize.py
|
suzuken/xbrlparser
|
d9309081b8d21113ebb7a0983c677bee971af0a1
|
[
"MIT"
] | 3 |
2015-01-12T16:16:50.000Z
|
2020-03-20T03:22:36.000Z
|
lib/rdflib-3.1.0/test/test_trix_serialize.py
|
suzuken/xbrlparser
|
d9309081b8d21113ebb7a0983c677bee971af0a1
|
[
"MIT"
] | null | null | null |
lib/rdflib-3.1.0/test/test_trix_serialize.py
|
suzuken/xbrlparser
|
d9309081b8d21113ebb7a0983c677bee971af0a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
if __name__=='__main__':
unittest.main()
| 26.984127 | 72 | 0.597059 |
0a554fb894afeaf01a54f7e6b34139ca26334475
| 862 |
py
|
Python
|
dbschema/revertDBinstall.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
dbschema/revertDBinstall.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
dbschema/revertDBinstall.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1 |
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
from sinedon import dbupgrade, dbconfig
import updatelib
project_dbupgrade = dbupgrade.DBUpgradeTools('projectdata', drop=True)
if __name__ == "__main__":
updatelib_inst = updatelib.UpdateLib(project_dbupgrade)
checkout_version = raw_input('Revert to checkout version, for example, 2.1 -->')
if checkout_version != 'trunk':
try:
map((lambda x:int(x)),checkout_version.split('.')[:2])
except:
print "valid versions are 'trunk', '2.1', or '2.1.2' etc"
raise
checkout_revision = int(raw_input('Revert to checkout revision, for example, 16500 -->'))
updatelib_inst.updateDatabaseVersion(checkout_version)
print "\033[35mVersion Updated in the database %s\033[0m" % checkout_version
updatelib_inst.updateDatabaseRevision(checkout_revision)
print "\033[35mRevision Updated in the database as %d\033[0m" % checkout_revision
| 41.047619 | 90 | 0.759861 |
0a56848b910320fe7cdd13bea4f9b4579072e4c7
| 724 |
py
|
Python
|
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
|
guy4261/fight-churn
|
f3820edd6d4af5e0bd625434d3ad4236aa781ef4
|
[
"MIT"
] | 151 |
2019-04-26T19:05:14.000Z
|
2022-03-28T10:11:53.000Z
|
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
|
guy4261/fight-churn
|
f3820edd6d4af5e0bd625434d3ad4236aa781ef4
|
[
"MIT"
] | 15 |
2019-08-05T06:35:00.000Z
|
2022-03-31T02:58:30.000Z
|
fightchurn/listings/chap9/listing_9_4_regression_cparam.py
|
guy4261/fight-churn
|
f3820edd6d4af5e0bd625434d3ad4236aa781ef4
|
[
"MIT"
] | 71 |
2019-06-07T17:50:04.000Z
|
2022-03-27T02:49:24.000Z
|
from sklearn.linear_model import LogisticRegression
from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model
from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
| 51.714286 | 119 | 0.825967 |
0a56c8065ff434f391ba424536df2984e5ef9221
| 3,396 |
py
|
Python
|
notebooks/classical_clustering.py
|
prise6/smart-iss-posts
|
fc913078e7fbe6343fd36ec6ca9852322247da5d
|
[
"MIT"
] | null | null | null |
notebooks/classical_clustering.py
|
prise6/smart-iss-posts
|
fc913078e7fbe6343fd36ec6ca9852322247da5d
|
[
"MIT"
] | 10 |
2020-01-28T23:15:20.000Z
|
2022-03-12T00:12:31.000Z
|
notebooks/classical_clustering.py
|
prise6/smart-iss-posts
|
fc913078e7fbe6343fd36ec6ca9852322247da5d
|
[
"MIT"
] | null | null | null |
#%% [markdown]
# # Clustering classique
#%% [markdown]
# ## import classique
import os
#%%
%load_ext autoreload
%autoreload 2
os.chdir('/home/jovyan/work')
#%% [markdown]
# ## Import iss
#%%
from iss.tools import Config
from iss.tools import Tools
from iss.models import SimpleConvAutoEncoder
from iss.clustering import ClassicalClustering
from iss.clustering import AdvancedClustering
from dotenv import find_dotenv, load_dotenv
import numpy as np
#%% [markdown]
### Chargement de la config
#%%
load_dotenv(find_dotenv())
cfg = Config(project_dir = os.getenv("PROJECT_DIR"), mode = os.getenv("MODE"))
#%% [markdown]
### Chargement du modle
#%%
## charger le modle
model_type = 'simple_conv'
cfg.get('models')[model_type]['model_name'] = 'model_colab'
model = SimpleConvAutoEncoder(cfg.get('models')[model_type])
#%% [markdown]
## Chargement des images
#%%
filenames = Tools.list_directory_filenames('data/processed/models/autoencoder/train/k/')
generator_imgs = Tools.generator_np_picture_from_filenames(filenames, target_size = (27, 48), batch = 496, nb_batch = 10)
#%%
pictures_id, pictures_preds = Tools.encoded_pictures_from_generator(generator_imgs, model)
#%%
intermediate_output = pictures_preds.reshape((pictures_preds.shape[0], 3*6*16))
#%% [markdown]
# ## ACP
# Rduction de la dimension
#%%
clustering = ClassicalClustering(cfg.get('clustering')['classical'], pictures_id, intermediate_output)
#%%
clustering.compute_pca()
#%% [markdown]
# ## Kmeans
# Premiers clusters
#%%
clustering.compute_kmeans()
clustering.compute_kmeans_centers()
#%% [markdown]
# ## CAH
# Seconds clusters
#%%
clustering.compute_cah()
clustering.compute_cah_labels()
#%% [markdown]
# ## Rsultats
#%% [markdown]
# ### Clusters intermediaires
#%%
fig = plt.figure(1, figsize=(12, 7))
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.kmeans_labels)
#%% [markdown]
# ### Clusters finaux
#%%
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.final_labels)
#%% [markdown]
# ### Sauvegarde des modles
#%%
clustering.save()
#%%
# clustering = ClassicalClustering(cfg.get('clustering')['classical'])
clustering.load()
#%% [markdown]
# ##Visualisation des clusters
#%%
#%%
from IPython.display import Image
#%%
for cl in range(0,19):
print("Cluster %s" % (cl))
res_tmp = select_cluster(clustering, cl)
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp[:100]]
# img = Tools.display_mosaic(image_array, nrow = 10)
# fig = plt.figure(1, figsize=(12, 7))
# plt.imshow(img, aspect = 'auto')
# plt.show()
#%% [markdown]
###Zoom sur le cluster 0
#%%
res_tmp = select_cluster(clustering, 1)
#%%
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp]
#%%
Tools.display_mosaic(image_array, nrow = 18)
#%%
col = [1 if l == 1 else 0 for l in clustering.kmeans_labels]
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = col)
#%%
plt.scatter(clustering.pca_reduction[np.array(col) == 1, 0], clustering.pca_reduction[np.array(col) == 1, 1])
| 22.196078 | 152 | 0.707008 |
0a56fc807619248f05b24361a88a0d2de688ca4d
| 2,156 |
py
|
Python
|
SM_28BYJ48/logger/logger.py
|
kaulketh/stepper-motor-stuff
|
ca7cc78279b378e5ad8e19f9c77b794a43d9a07e
|
[
"Unlicense"
] | null | null | null |
SM_28BYJ48/logger/logger.py
|
kaulketh/stepper-motor-stuff
|
ca7cc78279b378e5ad8e19f9c77b794a43d9a07e
|
[
"Unlicense"
] | null | null | null |
SM_28BYJ48/logger/logger.py
|
kaulketh/stepper-motor-stuff
|
ca7cc78279b378e5ad8e19f9c77b794a43d9a07e
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# created 02.02.2021, tkaulke
# Thomas Kaulke, [email protected]
# https://github.com/kaulketh
# -----------------------------------------------------------
__author__ = "Thomas Kaulke"
__email__ = "[email protected]"
import errno
import logging
import os
from logging.config import fileConfig
# runtime location
this_folder = os.path.dirname(os.path.abspath(__file__))
# define log folder related to location
log_folder = os.path.join(this_folder, '../logs')
# define ini and log files
ini_file = 'debug.ini'
info_log_file = log_folder + '/info.log'
error_log_file = log_folder + '/error.log'
# check if exists or create log folder
try:
os.makedirs(log_folder, exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs(log_folder)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(log_folder):
pass
else:
raise
# setup configuration
config_file = os.path.join(this_folder, ini_file)
fileConfig(config_file, disable_existing_loggers=True)
# create handlers
handler_info = logging.FileHandler(os.path.join(this_folder, info_log_file))
handler_error = logging.FileHandler(os.path.join(this_folder, error_log_file))
# set levels
handler_info.setLevel(logging.INFO)
handler_error.setLevel(logging.ERROR)
# create formatters and add to handlers
format_info = \
logging.Formatter('%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'%(message).180s', datefmt='%Y-%m-%d %H:%M:%S')
format_error = \
logging.Formatter(
'%(asctime)s %(levelname)s '
'[ %(module)s.%(funcName)s linenr.%(lineno)s ] '
'[ thread: %(threadName)s ] %(message)s')
handler_info.setFormatter(format_info)
handler_error.setFormatter(format_error)
if __name__ == '__main__':
pass
| 28.746667 | 78 | 0.652597 |
0a5717bafa0ff0998fa59136b620ff9a8093ee50
| 11,521 |
py
|
Python
|
tests/test_mr_uplift.py
|
Ibotta/mr_uplift
|
e1facd39a87683dfdeaf7b08336e0ce781ff87cf
|
[
"Apache-2.0"
] | 48 |
2020-04-22T16:57:55.000Z
|
2022-02-02T00:21:13.000Z
|
tests/test_mr_uplift.py
|
Ibotta/mr_uplift
|
e1facd39a87683dfdeaf7b08336e0ce781ff87cf
|
[
"Apache-2.0"
] | 6 |
2020-05-01T18:15:22.000Z
|
2022-02-21T07:26:18.000Z
|
tests/test_mr_uplift.py
|
Ibotta/mr_uplift
|
e1facd39a87683dfdeaf7b08336e0ce781ff87cf
|
[
"Apache-2.0"
] | 4 |
2020-04-25T08:41:34.000Z
|
2022-01-08T11:21:23.000Z
|
import numpy as np
import pytest
from mr_uplift.dataset.data_simulation import get_no_noise_data, get_simple_uplift_data, get_observational_uplift_data_1
from mr_uplift.mr_uplift import MRUplift, get_t_data
from mr_uplift.keras_model_functionality import prepare_data_optimized_loss
import sys
import pandas as pd
| 42.356618 | 174 | 0.63432 |
0a57479ced46772f03d9c9dc023a3217a695d37d
| 345 |
py
|
Python
|
lambdataalchemani/lambda_test.py
|
Full-Data-Alchemist/lambdata-Mani-alch
|
90dcbc091d8f9841d5a1046e64437058a4156dc5
|
[
"MIT"
] | null | null | null |
lambdataalchemani/lambda_test.py
|
Full-Data-Alchemist/lambdata-Mani-alch
|
90dcbc091d8f9841d5a1046e64437058a4156dc5
|
[
"MIT"
] | null | null | null |
lambdataalchemani/lambda_test.py
|
Full-Data-Alchemist/lambdata-Mani-alch
|
90dcbc091d8f9841d5a1046e64437058a4156dc5
|
[
"MIT"
] | null | null | null |
"""
"""
import unittest
from example_module import COLORS, increment
| 15.681818 | 44 | 0.550725 |
0a585a8c735b3266210fbee5416e533aa2feb0c6
| 8,847 |
py
|
Python
|
desktop/core/src/desktop/auth/views.py
|
bopopescu/hue-5
|
665c275d0c0570b1a4a34a293503cc72ec35695c
|
[
"Apache-2.0"
] | 1 |
2018-05-07T05:40:36.000Z
|
2018-05-07T05:40:36.000Z
|
desktop/core/src/desktop/auth/views.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/auth/views.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | 1 |
2022-03-21T09:41:35.000Z
|
2022-03-21T09:41:35.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import oauth2 as oauth
except:
oauth = None
import cgi
import logging
import urllib
from datetime import datetime
from axes.decorators import watch_login
import django.contrib.auth.views
from django.core import urlresolvers
from django.core.exceptions import SuspiciousOperation
from django.contrib.auth import login, get_backends, authenticate
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from desktop.auth import forms as auth_forms
from desktop.lib.django_util import render
from desktop.lib.django_util import login_notrequired
from desktop.lib.django_util import JsonResponse
from desktop.log.access import access_warn, last_access_map
from desktop.conf import LDAP, OAUTH, DEMO_ENABLED
from hadoop.fs.exceptions import WebHdfsException
from useradmin.models import get_profile
from useradmin.views import ensure_home_directory, require_change_password
LOG = logging.getLogger(__name__)
def get_current_users():
"""Return dictionary of User objects and
a dictionary of the user's IP address and last access time"""
current_users = { }
for session in Session.objects.all():
try:
uid = session.get_decoded().get(django.contrib.auth.SESSION_KEY)
except SuspiciousOperation:
# If secret_key changed, this resolution won't work.
uid = None
if uid is not None:
try:
userobj = User.objects.get(pk=uid)
current_users[userobj] = last_access_map.get(userobj.username, { })
except User.DoesNotExist:
LOG.debug("User with id=%d does not exist" % uid)
return current_users
def dt_logout(request, next_page=None):
"""Log out the user"""
username = request.user.get_username()
request.audit = {
'username': username,
'operation': 'USER_LOGOUT',
'operationText': 'Logged out user: %s' % username
}
backends = get_backends()
if backends:
for backend in backends:
if hasattr(backend, 'logout'):
response = backend.logout(request, next_page)
if response:
return response
return django.contrib.auth.views.logout(request, next_page)
def profile(request):
"""
Dumps JSON for user-profile information.
"""
return render(None, request, _profile_dict(request.user))
# OAuth is based on Twitter as example.
| 34.158301 | 126 | 0.722505 |
0a58e531ca2dae9287cb878ce3e08653ca7ffa30
| 1,451 |
py
|
Python
|
gsheetsdb/url.py
|
tim-werner/gsheets-db-api
|
12f2a4fbe1bd5aa36781226759326ce782b08a91
|
[
"MIT"
] | 3 |
2021-02-23T06:40:35.000Z
|
2022-03-14T23:13:10.000Z
|
gsheetsdb/url.py
|
tim-werner/gsheets-db-api
|
12f2a4fbe1bd5aa36781226759326ce782b08a91
|
[
"MIT"
] | null | null | null |
gsheetsdb/url.py
|
tim-werner/gsheets-db-api
|
12f2a4fbe1bd5aa36781226759326ce782b08a91
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from moz_sql_parser import parse as parse_sql
import pyparsing
import re
from six.moves.urllib import parse
FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
| 25.45614 | 63 | 0.626465 |
0a5afdc282108af1d03f7c2caaa0527030efeee6
| 5,178 |
py
|
Python
|
detr/datasets/construction_panoptic.py
|
joyjeni/detr-fine
|
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
|
[
"Apache-2.0"
] | null | null | null |
detr/datasets/construction_panoptic.py
|
joyjeni/detr-fine
|
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
|
[
"Apache-2.0"
] | null | null | null |
detr/datasets/construction_panoptic.py
|
joyjeni/detr-fine
|
dfc0f4abc2579a2b3ef4527904af3345c7a9de4d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
# from util.box_ops import masks_to_boxes
from .construction import make_construction_transforms
import logging
| 30.821429 | 115 | 0.571649 |
0a5cd9823d91b39775866f431a665d36a045cbd2
| 2,450 |
py
|
Python
|
Code/all-starter-code/search.py
|
diyarkudrat/CS-1.3-Core-Data-Structures
|
7d7d48ad7913cded7b0ea75ced144d0a08989924
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/search.py
|
diyarkudrat/CS-1.3-Core-Data-Structures
|
7d7d48ad7913cded7b0ea75ced144d0a08989924
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/search.py
|
diyarkudrat/CS-1.3-Core-Data-Structures
|
7d7d48ad7913cded7b0ea75ced144d0a08989924
|
[
"MIT"
] | null | null | null |
#!python
"""
ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!!
"""
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
return linear_search_iterative(array, item)
# return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
"""Time complexity: O(n) because you iterate through n amount of items in array
Space Complexity: O(n) because there are n amount of items"""
# loop over all array values until item is found
for index, value in enumerate(array): #O(n)
if item == value: #O(1)
return index # found O(1)
return None # not found O(1)
def linear_search_recursive(array, item, index=0):
"""Time complexity: O(n) because you are returning the function continuously until index equals to nth-item
"""
if len(array) <= index:
return index
if array[index] == item:
return index
else:
return linear_search_recursive(array, item, index + 1)
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
return binary_search_iterative(array, item)
# return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
"""Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1
Space Complexity: O(1) """
left, right = 0, len(array) - 1
if len(array) == 0:
return None
while left <= right:
middle = left + (right - left) // 2
if item == array[middle]:
return middle
elif item > array[middle]:
left = middle + 1
else:
right = middle - 1
return None
def binary_search_recursive(array, item, left=None, right=None):
"""Time Complexity: O(log*n)
Space Complexity: 0(log*n) recursion call stack space"""
# TODO: implement binary search recursively here
if left is None and right is None:
left, right = 0, len(array) - 1
middle = left + (right - left) // 2
if left > right:
return None
if array[middle] == item:
return middle
elif item > array[middle]:
return binary_search_recursive(array, item, middle + 1, right)
else:
return binary_search_recursive(array, item, left, middle - 1)
| 27.222222 | 117 | 0.628571 |
0a5d7faf0aee2e49257e320032c83e577c7a4db4
| 2,994 |
py
|
Python
|
max_ai/src/max_ai/mem_db.py
|
mat-heim/max_ros
|
e01e4f5b2db96d94865d80452d41b8dcf1412232
|
[
"Apache-2.0"
] | null | null | null |
max_ai/src/max_ai/mem_db.py
|
mat-heim/max_ros
|
e01e4f5b2db96d94865d80452d41b8dcf1412232
|
[
"Apache-2.0"
] | null | null | null |
max_ai/src/max_ai/mem_db.py
|
mat-heim/max_ros
|
e01e4f5b2db96d94865d80452d41b8dcf1412232
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
'''
memory class
stored in sqlite data base
holds raw input and memories in parse taged columns
'''
import sys
import re
import sqlite3
import os
from datetime import date, datetime
from pattern.en import parse
from pattern.en import pprint
from pattern.en import parsetree
from pattern.en import wordnet
from pattern.en import pluralize, singularize
from pattern.en import conjugate, lemma, lexeme
#dir = os.path.dirname(os.path.abspath(__file__))
dir = '/home/erni/catkin_ws/src/max_ros/max_ai/src/max_ai/'
RM = sqlite3.connect(dir +'robbie_memory.sqlite')
#RM = sqlite3.connect(dir + '/data/robbie_memory.db')
cursor = RM.cursor()
# Information about a single concept
# Robbie memory class. Collection of concepts
| 31.1875 | 108 | 0.58684 |
0a5e25995315baeb1a8d9bd6a0b259803f947416
| 1,768 |
py
|
Python
|
examples/pylab_examples/image_masked.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 16 |
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
examples/pylab_examples/image_masked.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 7 |
2015-05-08T19:36:25.000Z
|
2015-06-30T15:32:17.000Z
|
examples/pylab_examples/image_masked.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 6 |
2015-06-05T03:34:06.000Z
|
2022-01-25T09:07:10.000Z
|
#!/usr/bin/env python
'''imshow with masked array input and out-of-range colors.
The second subplot illustrates the use of BoundaryNorm to
get a filled contour effect.
'''
from pylab import *
from numpy import ma
import matplotlib.colors as colors
delta = 0.025
x = y = arange(-3.0, 3.0, delta)
X, Y = meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z2-Z1) # difference of Gaussians
# Set up a colormap:
palette = cm.gray
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
subplot(1,2,1)
im = imshow(Zm, interpolation='bilinear',
cmap=palette,
norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('Green=low, Red=high, Blue=bad')
colorbar(im, extend='both', orientation='horizontal', shrink=0.8)
subplot(1,2,2)
im = imshow(Zm, interpolation='nearest',
cmap=palette,
norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=256, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('With BoundaryNorm')
colorbar(im, extend='both', spacing='proportional',
orientation='horizontal', shrink=0.8)
show()
| 31.571429 | 70 | 0.673643 |
0a5ebfcd3225195296a0dbc5f193ada9ab19e141
| 164 |
py
|
Python
|
app/schemas/socket.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 6 |
2019-01-01T23:38:12.000Z
|
2021-07-27T03:43:11.000Z
|
app/schemas/socket.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 1 |
2020-08-02T00:21:41.000Z
|
2020-08-02T00:21:41.000Z
|
app/schemas/socket.py
|
d3vzer0/reternal-backend
|
aeeb613c820759212e7aef9150738a66b2882d50
|
[
"MIT"
] | 1 |
2021-07-27T03:43:24.000Z
|
2021-07-27T03:43:24.000Z
|
from pydantic import BaseModel, validator, Field
from typing import List, Dict
from datetime import datetime
| 20.5 | 48 | 0.804878 |
0a5ef02906722fedfa7e1972d812a70076441239
| 1,548 |
py
|
Python
|
meme/meme.py
|
aniket091/modmail-plugins-1
|
4360ff885f27e5c9488ea5cf9431aff20435209b
|
[
"MIT"
] | 8 |
2020-01-03T19:01:59.000Z
|
2021-04-14T13:30:49.000Z
|
meme/meme.py
|
aniket091/modmail-plugins-1
|
4360ff885f27e5c9488ea5cf9431aff20435209b
|
[
"MIT"
] | 4 |
2020-12-22T12:51:03.000Z
|
2022-01-05T20:17:00.000Z
|
meme/meme.py
|
aniket091/modmail-plugins-1
|
4360ff885f27e5c9488ea5cf9431aff20435209b
|
[
"MIT"
] | 27 |
2020-01-17T18:05:29.000Z
|
2022-02-04T07:38:52.000Z
|
import discord
from discord.ext import commands
import requests
import random
from box import Box
| 30.352941 | 108 | 0.566537 |
0a5f2c5e88f319fb43560833894661a1abbe9435
| 1,934 |
py
|
Python
|
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 20bdcef0-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
| 46.047619 | 320 | 0.682006 |
0a61c9cfc48e56723e2d98bba70acd01045f443c
| 1,357 |
py
|
Python
|
cv_recommender/account/urls.py
|
hhhameem/CV-Recommender
|
b85d53934f0d888835ab8201be388d7d69f0693d
|
[
"MIT"
] | 1 |
2021-09-14T17:40:17.000Z
|
2021-09-14T17:40:17.000Z
|
cv_recommender/account/urls.py
|
mjohra/Cv-Recommender-Python-Django
|
d231092f7bd989b513210dd6031fb23e28bd5dfe
|
[
"MIT"
] | 1 |
2021-03-31T17:45:15.000Z
|
2021-03-31T17:45:15.000Z
|
cv_recommender/account/urls.py
|
mjohra/Cv-Recommender-Python-Django
|
d231092f7bd989b513210dd6031fb23e28bd5dfe
|
[
"MIT"
] | 1 |
2021-03-31T16:58:50.000Z
|
2021-03-31T16:58:50.000Z
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('register/', views.register, name='register'),
path('login/', views.userlogin, name='login'),
path('logout/', views.userlogout, name='logout'),
path('password_change/', auth_views.PasswordChangeView.as_view(),
name='password_change'),
path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(),
name='password_change_done'),
path('password_reset/', auth_views.PasswordResetView.as_view(),
name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'),
path('applicantdashboard/', views.applicantdashboard,
name='applicantdashboard'),
path('recruiterdashboard/', views.recruiterdashboard,
name='recruiterdashboard'),
path('applicantdashboard/profile-edit/', views.applicantedit,
name='editapplicantprofile'),
path('recruiterdashboard/profile-edit/', views.recruiteredit,
name='editrecruiterprofile'),
]
| 45.233333 | 82 | 0.709654 |
0a62f6ea092332203dc81ebef45e051b04506ddf
| 12,246 |
py
|
Python
|
Moodle/scripts/edit_conf.py
|
nii-gakunin-cloud/ocs-templates
|
a2a39bb8824d489488af3c3972007317bb1ef6a2
|
[
"BSD-3-Clause"
] | 4 |
2020-05-11T06:30:53.000Z
|
2022-01-26T03:31:55.000Z
|
Moodle/scripts/edit_conf.py
|
nii-gakunin-cloud/ocs-templates
|
a2a39bb8824d489488af3c3972007317bb1ef6a2
|
[
"BSD-3-Clause"
] | 1 |
2021-06-17T01:34:27.000Z
|
2021-06-17T01:34:27.000Z
|
Moodle/scripts/edit_conf.py
|
nii-gakunin-cloud/ocs-templates
|
a2a39bb8824d489488af3c3972007317bb1ef6a2
|
[
"BSD-3-Clause"
] | 3 |
2020-09-08T00:57:52.000Z
|
2022-01-18T10:42:22.000Z
|
from datetime import datetime
from difflib import unified_diff
from logging import basicConfig, getLogger, INFO
import os
from pathlib import Path
import shutil
import subprocess
import sys
import yaml
from urllib.parse import urlparse
from notebook import notebookapp
from IPython.core.display import HTML
WORKDIR = 'edit'
META_YML = '.vcp-meta.yml'
MOODLE_DIR = '/opt/moodle'
CONF_RELATIVE = '/etc'
ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE']
logger = getLogger(__name__)
basicConfig(level=INFO, format='%(message)s')
| 34.59322 | 79 | 0.682182 |
0a63b2be4d7b2116c7bb45a2e0a6f93a06e01c5e
| 959 |
py
|
Python
|
other/minimum_edit_distance.py
|
newvicklee/nlp_algorithms
|
d2812398d96d345dcb50970bae6ebbf666ea5380
|
[
"MIT"
] | null | null | null |
other/minimum_edit_distance.py
|
newvicklee/nlp_algorithms
|
d2812398d96d345dcb50970bae6ebbf666ea5380
|
[
"MIT"
] | null | null | null |
other/minimum_edit_distance.py
|
newvicklee/nlp_algorithms
|
d2812398d96d345dcb50970bae6ebbf666ea5380
|
[
"MIT"
] | null | null | null |
"""
Minimum edit distance computes the cost it takes to get from one string to another string.
This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions.
Resource: https://en.wikipedia.org/wiki/Edit_distance
For example, getting from "intention" to "execution" is a cost of 8.
minimum_edit_distance("intention", "execution")
# 8
"""
| 28.205882 | 129 | 0.535975 |
0a65447ee836106ce8cee612e580a711dcd38121
| 7,219 |
py
|
Python
|
varifier/dnadiff.py
|
iqbal-lab-org/varifier
|
718a787fd8490ea33a79b5095884e66e12106399
|
[
"MIT"
] | 11 |
2020-04-06T11:22:50.000Z
|
2021-11-12T18:09:41.000Z
|
varifier/dnadiff.py
|
martinghunt/varifier
|
9f05477b5e48e96264c392fbd14ca98d1ed86e48
|
[
"MIT"
] | 17 |
2020-04-01T15:19:55.000Z
|
2021-11-12T05:07:01.000Z
|
varifier/dnadiff.py
|
martinghunt/varifier
|
9f05477b5e48e96264c392fbd14ca98d1ed86e48
|
[
"MIT"
] | 3 |
2020-04-01T10:41:27.000Z
|
2020-08-05T06:27:21.000Z
|
from operator import attrgetter
import logging
import os
import shutil
import subprocess
import pyfastaq
import pymummer
from cluster_vcf_records import vcf_record
from varifier import utils
# We only want the .snps file from the dnadiff script from MUMmer. From reading
# the docs inspecting that script, we need to run these commands:
#
# nucmer --maxmatch --delta out.delta ref.fasta query.fasta
# delta-filter -1 out.delta > out.1delta
# show-snps -rlTHC out.1delta > out.snps
#
# This is instead of just running show-snps, which runs several other commands
# in addition to making the snps file.
def _snps_file_to_vcf(snps_file, query_fasta, outfile):
"""Loads the .snps file made by dnadiff.
query_fasta = fasta file of query sequences.
Writes a new VCF file unmerged records."""
vcf_records = {}
variants = pymummer.snp_file.get_all_variants(snps_file)
query_seqs = utils.file_to_dict_of_seqs(query_fasta)
for variant in variants:
# If the variant is reversed, it means that either the ref or query had to be
# reverse complemented when aligned by mummer. Need to do the appropriate
# reverse (complement) fixes so the VCF has the correct REF and ALT sequences
if variant.reverse:
qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base)
qry_seq.revcomp()
variant.qry_base = "".join(reversed(qry_seq.seq))
ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base)
ref_seq.revcomp()
variant.ref_base = ref_seq.seq
if variant.var_type == pymummer.variant.SNP:
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
variant.qry_base,
variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_SNP",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.DEL:
# The query has sequence missing, compared to the
# reference. We're making VCF records w.r.t. the
# query, so this is an insertion. So need to
# get the nucleotide before the insertion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start + 1),
".",
query_seqs[variant.qry_name][variant.qry_start],
query_seqs[variant.qry_name][variant.qry_start]
+ variant.ref_base,
".",
".",
"SVTYPE=DNADIFF_INS",
"GT",
"1/1",
]
)
)
elif variant.var_type == pymummer.variant.INS:
# The ref has sequence missing, compared to the
# query. We're making VCF records w.r.t. the
# query, so this is a deletion. So need to
# get the nucleotide before the deletion as well.
new_record = vcf_record.VcfRecord(
"\t".join(
[
variant.qry_name,
str(variant.qry_start),
".",
query_seqs[variant.qry_name][variant.qry_start - 1]
+ variant.qry_base,
query_seqs[variant.qry_name][variant.qry_start - 1],
".",
".",
"SVTYPE=DNADIFF_DEL",
"GT",
"1/1",
]
)
)
else:
raise Exception("Unknown variant type: " + str(variant))
assert (
new_record.REF
== query_seqs[new_record.CHROM][
new_record.POS : new_record.POS + len(new_record.REF)
]
)
if new_record.CHROM not in vcf_records:
vcf_records[new_record.CHROM] = []
vcf_records[new_record.CHROM].append(new_record)
for vcf_list in vcf_records.values():
vcf_list.sort(key=attrgetter("POS"))
with open(outfile, "w") as f:
print("##fileformat=VCFv4.2", file=f)
for seq in query_seqs.values():
print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f)
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f)
for key, vcf_list in sorted(vcf_records.items()):
for record in vcf_list:
print(record, file=f)
| 34.37619 | 95 | 0.543289 |
0a658f2185402efce42f9a0cf262eb928b7b63f0
| 1,650 |
py
|
Python
|
modules/models.py
|
sbj-ss/github-watcher
|
7d7c4d2a0a6a014b93a2168dc6e508b2b867a414
|
[
"MIT"
] | null | null | null |
modules/models.py
|
sbj-ss/github-watcher
|
7d7c4d2a0a6a014b93a2168dc6e508b2b867a414
|
[
"MIT"
] | null | null | null |
modules/models.py
|
sbj-ss/github-watcher
|
7d7c4d2a0a6a014b93a2168dc6e508b2b867a414
|
[
"MIT"
] | null | null | null |
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Type
| 25 | 109 | 0.577576 |
0a6616a10563e4ebc6f0a75abad1fbf54a72a196
| 2,776 |
py
|
Python
|
queryfilter/datetimefilter.py
|
iCHEF/queryfilter
|
0ae4faf525e162d2720d328b96fa179d68277f1e
|
[
"Apache-2.0"
] | 4 |
2018-05-11T18:07:32.000Z
|
2019-07-30T13:38:49.000Z
|
queryfilter/datetimefilter.py
|
iCHEF/queryfilter
|
0ae4faf525e162d2720d328b96fa179d68277f1e
|
[
"Apache-2.0"
] | 6 |
2018-02-26T04:46:36.000Z
|
2019-04-10T06:17:12.000Z
|
queryfilter/datetimefilter.py
|
iCHEF/queryfilter
|
0ae4faf525e162d2720d328b96fa179d68277f1e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import datetime
from dateutil import parser
import pytz
from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin
from .queryfilter import QueryFilter
WHOLE_DAY = datetime.timedelta(days=1)
ONE_SECOND = datetime.timedelta(seconds=1)
min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc)
max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc)
| 25.46789 | 72 | 0.648055 |
0a6634c8b3d57a247c912406564142afedbbeba0
| 13,829 |
py
|
Python
|
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 12 |
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 3 |
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/operator/rpn.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 2 |
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import all torch operators."""
import torch.nn.functional as F
import torch.nn as nn
import torch
from vega.search_space.networks.network_factory import NetworkFactory
from vega.search_space.networks.net_utils import NetTypes
from vega.search_space.networks.pytorch.utils.anchor_utils.anchor_target import AnchorTarget
from vega.search_space.networks.pytorch.utils.bbox_utils.anchor_generator import AnchorGenerator
from vega.core.common.config import Config
from functools import partial
import numpy as np
from six.moves import map, zip
from vega.search_space.networks.pytorch.losses.reduce_loss import weighted_loss
def multi_apply(func, *args, **kwargs):
"""Multi apply.
:param func: function
:param args: args of function
:return: result
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
"""Cross entropy losses.
:param pred: predict result
:param label: gt label
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
loss = F.cross_entropy(pred, label, reduction='none')
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
"""Expand binary labels.
:param labels: labels
:param label_weights: label weights
:param label_channels: label channels
:return: binary label and label weights
"""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
"""Binary cross entropy loss.
:param pred: predict result
:param label: gt label
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight, reduction='none')
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):
"""Mask cross entropy loss.
:param pred: predict result
:param target: target
:param label: gt label
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None]
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Weight reduce loss.
:param loss: losses
:param weight: weight
:param reduction: reduce function
:param avg_factor: avg factor
:return: loss
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
if reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def reduce_loss(loss, reduction):
"""Reduce loss compute.
:param loss: losses
:param reduction: reduce funtion
:return: loss
"""
reduction_function = F._Reduction.get_enum(reduction)
if reduction_function == 0:
return loss
elif reduction_function == 1:
return loss.mean()
elif reduction_function == 2:
return loss.sum()
| 37.991758 | 116 | 0.653048 |
0a6637af877e66a30d055aa9bfab27307de91c10
| 5,292 |
py
|
Python
|
scrapy/http/request/__init__.py
|
joybhallaa/scrapy
|
e4750f2fbdacbeb7a20ae7c6b13bba3fb0f7ad54
|
[
"BSD-3-Clause"
] | 1 |
2020-04-18T16:48:49.000Z
|
2020-04-18T16:48:49.000Z
|
scrapy/http/request/__init__.py
|
Venfox/scrapy
|
cf39602c3038d576e14c20a2ac22f88006deb63b
|
[
"BSD-3-Clause"
] | null | null | null |
scrapy/http/request/__init__.py
|
Venfox/scrapy
|
cf39602c3038d576e14c20a2ac22f88006deb63b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from w3lib.url import safe_url_string
from scrapy.http.headers import Headers
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import escape_ajax
from scrapy.http.common import obsolete_setter
from scrapy.utils.curl import curl_to_request_kwargs
url = property(_get_url, obsolete_setter(_set_url, 'url'))
body = property(_get_body, obsolete_setter(_set_body, 'body'))
__repr__ = __str__
def copy(self):
"""Return a copy of this Request"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Request with the same attributes except for those
given new values.
"""
for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags',
'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop('cls', self.__class__)
return cls(*args, **kwargs)
| 38.071942 | 102 | 0.634732 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.