blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cee8539e090be64a5f7173b3a057803ae9d66c8f | 055e5a9b64e53d0a87d07ce724fdb05eadc00a8e | /jmatcher/users/migrations/0019_merge_20170410_0521.py | 7057179ad7b2519ed202fd18cd66ab435f2bc945 | [
"MIT"
] | permissive | jamesaud/se1-group4 | 73e73cfe6f56a1d1f1b256f13230a28f8d272f6c | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | refs/heads/master | 2021-01-09T05:41:11.686168 | 2017-04-27T17:13:17 | 2017-04-27T17:13:17 | 80,780,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-10 05:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0015_auto_20170409_1556'),
('users', '0018_user_short_description'),
]
operations = [
]
| [
"[email protected]"
] | |
5670c82a43a643f5d733543fb0552927320696bb | 832f86e052d90916fb0c8156825c87dc13c0443e | /imported-from-gmail/2020-01-16-fix-brackets.py | 05fd311f1316bc6c204495b94f6525133aaf14b7 | [] | no_license | johncornflake/dailyinterview | 292615849cea62cb945ecc7039c594b6966a81f3 | 91bb0edb9e25255e6222279109c15ae9d203970c | refs/heads/master | 2022-12-09T21:02:12.204755 | 2021-06-07T13:09:34 | 2021-06-07T13:09:34 | 225,059,833 | 0 | 0 | null | 2022-12-08T11:27:38 | 2019-11-30T19:24:58 | Python | UTF-8 | Python | false | false | 611 | py | Hi, here's your problem today. This problem was recently asked by Twitter:
Given a string with only
(
and
)
, find the minimum number of characters to add or subtract to fix the string such that the brackets are balanced.
Example:
Input: '(()()'
Output: 1
Explanation:
The fixed string could either be
()()
by deleting the first bracket, or
(()())
by adding a bracket. These are not the only ways of fixing the string, there are many other ways by adding it in different positions!
Here's some code to start with:
def
fix_brackets
(
s
):
# Fill this in.
print
fix_brackets
(
'(()()'
)
# 1
| [
"[email protected]"
] | |
e75dd8c6b2b4b77b7eb7778f390b6182c92f1b16 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03696/s991573247.py | 2a4297734905538666e9564192e9f911fa2b72ac | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
#from heapq import heappop, heappush
#from collections import defaultdict
sys.setrecursionlimit(10**7)
#import math
#from itertools import product, accumulate, combinations, product
#import bisect
#import numpy as np
#from copy import deepcopy
#from collections import deque
#from decimal import Decimal
#from numba import jit
INF = 1 << 50
def run():
N = int(input())
S = input()
position = 0
l = 0
r = 0
for s in S:
if s == '(':
position += 1
else:
position -= 1
if position < 0:
l += abs(position)
position = 0
r += position
print(l * '(' + S + r * ')')
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
5ff59f2a7c0c5bfe42bfba11cc687c1a6f58470f | e1d6de1fb5ce02907df8fa4d4e17e61d98e8727d | /corpora/corpus_reader.py | 87865c2db4af9e8d3a0af9f33d532c0e28e798b8 | [] | no_license | neuroph12/nlpy | 3f3d1a8653a832d6230cb565428ee0c77ef7451d | 095976d144dacf07414bf7ee42b811eaa67326c1 | refs/heads/master | 2020-09-16T08:24:37.381353 | 2016-09-10T19:24:05 | 2016-09-10T19:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | import nltk
from nltk.corpus import gutenberg
emma = gutenberg.words('austen-emma.txt')
print(len(emma))
emma = nltk.Text(gutenberg.words('austen-emma.txt'))
print(emma.concordance('surprize'))
# raw = gutenberg.raw("burgess-busterbrown.txt")
# print(raw[1:20])
# words
# words = gutenberg.words("burgess-busterbrown.txt")
# print(words[1:20])
# sents = gutenberg.sents("burgess-busterbrown.txt")
# print(sents[1:20])
for fileid in gutenberg.fileids():
num_chars = len(gutenberg.raw(fileid))
num_words = len(gutenberg.words(fileid))
num_sents = len(gutenberg.sents(fileid))
num_vocab = len(set(w.lower() for w in gutenberg.words(fileid)))
print(round(num_chars / num_words), round(num_words / num_sents), round(num_words / num_vocab), fileid) | [
"[email protected]"
] | |
672f0214711e47b569a7596461b9befa37e15306 | 150d9e4cee92be00251625b7f9ff231cc8306e9f | /PathSumII.py | dfcf36fff64e9e02cdc4acebd71dc0fec6b03159 | [] | no_license | JerinPaulS/Python-Programs | 0d3724ce277794be597104d9e8f8becb67282cb0 | d0778178d89d39a93ddb9b95ca18706554eb7655 | refs/heads/master | 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | '''
Path Sum II
Given the root of a binary tree and an integer targetSum, return all root-to-leaf paths where each path's sum equals targetSum.
A leaf is a node with no children.
Example 1:
Input: root = [5,4,8,11,null,13,4,7,2,null,null,5,1], targetSum = 22
Output: [[5,4,11,2],[5,8,4,5]]
Example 2:
Input: root = [1,2,3], targetSum = 5
Output: []
Example 3:
Input: root = [1,2], targetSum = 0
Output: []
Constraints:
The number of nodes in the tree is in the range [0, 5000].
-1000 <= Node.val <= 1000
-1000 <= targetSum <= 1000
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def pathSum(self, root, targetSum):
"""
:type root: TreeNode
:type targetSum: int
:rtype: List[List[int]]
"""
result = []
current = []
def dfs(root, target, current):
if root is None:
return False
current.append(root.val)
if root.left is None and root.right is None:
if target == root.val:
result.append(current)
return True
dfs(root.left, target - root.val, current[:])
dfs(root.right, target - root.val, current[:])
return
dfs(root, targetSum, current)
return result
root = TreeNode(5)
root.left = TreeNode(4)
root.right = TreeNode(8)
root.left.left = TreeNode(11)
root.right.left = TreeNode(13)
root.left.left.left = TreeNode(7)
root.left.left.right = TreeNode(2)
root.right.right = TreeNode(4)
root.right.right.left = TreeNode(5)
root.right.right.right = TreeNode(1)
obj = Solution()
print(obj.pathSum(root, 22)) | [
"[email protected]"
] | |
da4a77a1a728a608f6a7aba60efcbd04b675bee6 | 3088dc21f3e5eeb31575704712a695d71772495f | /torch/distributed/fsdp/_optim_utils.py | 601db958c3fe63a83b2a26dd2d61a7a842d25dbf | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | ezyang/pytorch | 8e82444c78025ff12fa605a62a18acfc43b176b8 | 6fc64500d1af9ee1306c7695ab0a2ff01852ff00 | refs/heads/master | 2023-09-02T01:56:28.108400 | 2022-06-07T19:13:21 | 2022-06-07T19:13:21 | 101,798,885 | 3 | 0 | NOASSERTION | 2022-08-23T22:10:07 | 2017-08-29T19:28:39 | C++ | UTF-8 | Python | false | false | 42,568 | py | import copy
import functools
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
import torch
import torch.distributed as dist
# Import the entire FSDP file to avoid circular imports
import torch.distributed.fsdp.fully_sharded_data_parallel as FSDP
from torch.distributed.fsdp.flatten_params_wrapper import FlatParameter
class _ConsolidatedOptimState:
"""
This holds the consolidated optimizer state on the target rank. Positive-
dimension tensor state is communicated across ranks, while zero-dimension
tensor state and non-tensor state is taken directly from the target rank.
PyTorch version 1.12 moved to using zero-dimension tensors for scalar
values, but user implemented optimizers may still use float (i.e. a
non-tensor). Thus, we support both and handle them identically.
Attributes:
tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
tensor state name to the unsharded flattened tensor representing
the state.
zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
dimension tensor state name to its value.
non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
name to its value.
"""
tensor_state: Dict[str, torch.Tensor] = {}
zero_dim_tensor_state: Dict[str, torch.Tensor] = {}
non_tensor_state: Dict[str, Any] = {}
class _PosDimTensorInfo(NamedTuple):
"""
Meatadata for positive-dimension tensors used internally for
:meth:`scatter_full_optim_state_dict`.
Attributes:
shape (torch.Size): Sharded tensor shape (which is equal to the
unsharded tensor shape if the tensor is optimizer state for a
non-FSDP parameter and is hence not sharded).
dtype (torch.dtype): Data type of the tensor.
"""
shape: torch.Size
dtype: torch.dtype
class _OptimStateKey(NamedTuple):
"""
This represents an optimizer state key that may be used commonly across
ranks. It is based on the unflattened parameter names rather than parameter
IDs to make it indepenendent of each rank's own optimizer construction.
"""
unflat_param_names: Tuple[str, ...]
is_flat_param: bool
def _unflatten_optim_state(
flat_param: FlatParameter,
flat_param_state: Dict[str, Any],
fsdp_module,
to_save: bool,
) -> List[Dict[str, Any]]:
"""
Unflattens the optimizer state, consisting of the "state" part and the
"param_groups" part. Unflattening the "state" part involves consolidating
the state on the target rank and remapping from flattened to unflattened
parameter IDs, and the "param_groups" part only involves remapping from
flattened to unflattened parameter IDs.
Args:
flat_param (FlatParameter): The flattened parameter.
flat_param_state (Dict[str, Any]): Entry for the flattened parameter
in the "state" part of the optimizer state dict.
fsdp_module (FullyShardedDataParallel): FSDP module that owns
``flat_param``, i.e. holds it in ``self.params``.
to_save (bool): Whether to save the state on this rank.
Returns:
List[Dict[str, Any]]: A :class:`list` holding the entries in the
"state" part of the optimizer state dict corresponding to the
unflattened parameters comprising the flattened parameter
``flat_param`` if on the target rank or an empty :class:`list`
otherwise. The final optimizer state dict will need to map these
entries using the proper unflattened parameter IDs.
"""
consolidated_state = _communicate_optim_state(
flat_param, flat_param_state, fsdp_module, to_save,
)
unflat_param_state = _unflatten_communicated_optim_state(
flat_param,
consolidated_state,
) if to_save else []
return unflat_param_state
def _communicate_optim_state(
flat_param: FlatParameter,
flat_param_state: Dict[str, Any],
fsdp_module,
to_save: bool,
) -> _ConsolidatedOptimState:
"""
Communicates the optimizer state for a flattened parameter ``flat_param``
across ranks so that the target rank holds the entire non-sharded optimizer
state.
If ``N`` is the number of tensor optimizer states in the optimizer state
dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
otherwise (where the plus 1 comes from all-gathering the padding per rank).
Args:
flat_param (FlatParameter): The flattened parameter.
flat_param_state (Dict[str, Any]): The entry in the "state" part of the
optimizer state dict corresponding to the flattened parameter.
fsdp_module (FullyShardedDataParallel): FSDP module that owns
``flat_param``, i.e. holds it in ``self.params``.
to_save (bool): Whether to save the state on this rank.
Returns:
ConsolidatedOptimState: Consolidated optimizer state for
``flat_param``; the state is not populated for non-target ranks.
"""
state = _ConsolidatedOptimState()
tensor_state, zero_dim_tensor_state, non_tensor_state = \
state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state
group = fsdp_module.process_group
tensor_buffer = None # initialize lazily in case it is not needed
for state_name, value in flat_param_state.items():
# Positive-dimension tensor state: communicate across ranks
if torch.is_tensor(value) and value.dim() > 0:
# If the parameter is not sharded (e.g. world size of 1), then
# neither is the positive-dimension tensor state, so no need to
# communicate it -- we take the target rank's value
if not flat_param._is_sharded:
tensor_state[state_name] = value.cpu()
continue
if tensor_buffer is None:
# Assume that positive-dimension tensor optimizer state
# has the same shape as the sharded flattened parameter
buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
tensor_buffer = value.new_zeros(*buffer_size)
dist._all_gather_base(tensor_buffer, value, group=group)
if to_save:
assert hasattr(flat_param, "_orig_size"), \
"Sharded flattened parameter should have `_orig_size` set"
unpadded_numel = flat_param._orig_size.numel() # type: ignore[attr-defined]
tensor_state[state_name] = tensor_buffer[:unpadded_numel].cpu()
# Zero-dimension tensor state and non-tensor state: take this rank's
# value directly
elif to_save:
if _is_zero_dim_tensor(value):
zero_dim_tensor_state[state_name] = value.cpu()
else:
non_tensor_state[state_name] = value
return state
def _unflatten_communicated_optim_state(
flat_param: FlatParameter,
state: _ConsolidatedOptimState,
) -> List[Dict[str, Any]]:
"""
Unflattens the communicated optimizer state (given by ``tensor_state``,
``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flattened
parameter ``flat_param``. This should only be called on the target rank.
Args:
flat_param (FlatParameter): The flattened parameter.
state (_ConsolidatedOptimState): Consolidated optimizer state.
Returns:
List[Dict[str, Any]]: A :class:`list` holding the entries in the
"state" part of the optimizer state dict corresponding to the
unflattened parameters comprising the flattened parameter
``flat_param``. The final optimizer state dict will need to map these
entries using the proper unflattened parameter IDs.
"""
unflat_param_state: List[Dict[str, Any]] = []
flat_param_views: Dict[str, Iterator] = {}
num_unflat_params = flat_param._num_unflattened_params
tensor_state, zero_dim_tensor_state, non_tensor_state = \
state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state
for _ in range(num_unflat_params):
unflat_state_param = {}
# Add positive-dimension tensor state: unflatten with views
for state_name, flat_tensor in tensor_state.items():
views_generated = state_name in flat_param_views
if not views_generated:
param_views = flat_param.get_param_views(flat_tensor)
flat_param_views[state_name] = param_views
else:
param_views = flat_param_views[state_name]
unflat_state_param[state_name] = next(param_views)
# Add zero-dimension tensor state: take the target rank's value
for state_name, zero_dim_tensor in zero_dim_tensor_state.items():
unflat_state_param[state_name] = zero_dim_tensor
# Add non-tensor state: take the target rank's value
for state_name, non_tensor in non_tensor_state.items():
unflat_state_param[state_name] = non_tensor
unflat_param_state.append(unflat_state_param)
return unflat_param_state
def _flatten_full_optim_state_dict(
full_optim_state_dict: Dict[str, Any],
model: torch.nn.Module,
shard_state: bool,
) -> Dict[str, Any]:
"""
Flattens the full optimizer state dict, still keying by unflattened
parameter names. If ``shard_state=True``, then FSDP-managed
``FlatParameter`` 's optimizer states are sharded, and otherwise, they are
kept unsharded.
Returns:
Dict[str, Any]: The flattened optimizer state dict.
"""
full_osd = full_optim_state_dict
if "state" not in full_osd or "param_groups" not in full_osd:
raise ValueError(
"`full_optim_state_dict` must have the keys \"state\" and "
"\"param_groups\" to be a valid optimizer state dict"
)
flat_param_to_fsdp_module = _get_flat_param_to_fsdp_module(model)
param_to_unflat_param_names = FSDP._get_param_to_unflat_param_names(model)
# Construct the "state" part
flat_osd_state: Dict[_OptimStateKey, Any] = {}
full_osd_state = full_osd["state"]
for param, unflat_param_names in param_to_unflat_param_names.items():
if isinstance(param, FlatParameter): # flatten FSDP parameters' states
assert param in flat_param_to_fsdp_module, \
"Check the `flat_param_to_fsdp_module` construction\n" \
f"param: {param}"
fsdp_module = flat_param_to_fsdp_module[param]
flat_state = _flatten_optim_state(
full_osd_state, unflat_param_names, fsdp_module, param,
shard_state,
)
key = _OptimStateKey(tuple(unflat_param_names), True)
flat_osd_state[key] = flat_state
else: # do not flatten non-FSDP parameters' states
assert len(unflat_param_names) == 1
unflat_param_name = unflat_param_names[0]
if unflat_param_name not in full_osd_state:
# The state dict may not have an entry for a parameter if it
# was not passed into the optimizer (e.g. if it is not an
# FSDP-managed parameter)
continue
key = _OptimStateKey(tuple(unflat_param_names), False)
flat_osd_state[key] = copy.copy(full_osd_state[unflat_param_name])
# Construct the "param_groups" part -- copy as is since it will be
# rekeyed later according to the target rank's `optim_input`
flat_osd_param_groups = copy.deepcopy(full_osd["param_groups"])
return {"state": flat_osd_state, "param_groups": flat_osd_param_groups}
def _flatten_optim_state(
unflat_osd_state: Dict[str, Dict[str, Any]],
unflat_param_names: List[str],
fsdp_module,
flat_param: FlatParameter,
shard_state: bool,
) -> Dict[str, Any]:
"""
Flattens the optimizer state in ``full_optim_state_dict`` for a single
flattened parameter ``flat_param`` in ``fsdp_module`` corresponding to
the unflattened parameter names in ``unflat_param_names``.
Args:
unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
optimizer state dict corresponding to the unflattened parameters.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the flattened parameter
``flat_param``.
fsdp_module (FullyShardedDataParallel): FSDP module owning the
flattened parameter.
flat_param (FlatParameter): The flattened parameter.
shard_state (bool): Whether to shard flattened positive-dimension
tensor state; if ``False``, then the full flattened tensor is
kept in the returned :class:`dict.
Returns:
Dict[str, Any]: A :class:`dict` mapping state names to their values for
a particular flattened parameter. The sharded optimizer state dict's
"state" part will map a key to this returned value.
"""
num_unflat_params = len(unflat_param_names)
assert num_unflat_params > 0, \
"Expects at least one unflattened parameter corresponding to the " \
"flattened parameter"
unflat_param_shapes = flat_param._param_shapes
num_unflat_param_shapes = len(unflat_param_shapes)
assert num_unflat_params == num_unflat_param_shapes, \
f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
# Check if these unflattened parameters have any optimizer state
has_state = [
bool(unflat_param_name in unflat_osd_state)
for unflat_param_name in unflat_param_names
]
# If none of the unflattened parameters comprising this flattened parameter
# have any state, then we do not want an entry in the optimizer state dict
if not any(has_state):
return {} # no need to flatten any state
# There may still be some unflattened parameters with state and some
# without
unflat_param_states = [
unflat_osd_state[unflat_param_name]
if unflat_param_name in unflat_osd_state else None
for unflat_param_name in unflat_param_names
]
# Check that the unflattened parameters have the same state names
state_names = None
for unflat_param_state in unflat_param_states:
if unflat_param_state is None:
continue
if state_names is None:
state_names = set(unflat_param_state.keys())
else:
if state_names != set(unflat_param_state.keys()):
raise ValueError(
"Differing optimizer state names for the unflattened "
f"parameters: {unflat_param_names}"
)
assert state_names is not None
# Flatten the state
flat_state: Dict[str, Any] = {}
for state_name in state_names:
state_values = [
unflat_param_state[state_name]
if unflat_param_state is not None else None
for unflat_param_state in unflat_param_states
]
non_none_state_values = [v for v in state_values if v is not None]
are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
for v in non_none_state_values:
are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
are_zero_dim_tensors &= _is_zero_dim_tensor(v)
are_non_tensors &= not torch.is_tensor(v)
types = set(type(v) for v in non_none_state_values)
if len(types) != 1 or not (
are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
):
raise ValueError(
f"Differing optimizer state types for state {state_name}, "
f"values {non_none_state_values}, and unflattened parameter "
f"names {unflat_param_names}"
)
if are_pos_dim_tensors:
flat_tensor = _flatten_tensor_optim_state(
state_name, state_values, unflat_param_names,
unflat_param_shapes, flat_param,
)
if shard_state:
# Shard the flattened tensor immediately to minimize max memory
# usage
sharded_flat_tensor, _ = fsdp_module._get_shard(flat_tensor)
flat_state[state_name] = sharded_flat_tensor
else:
flat_state[state_name] = flat_tensor
elif are_zero_dim_tensors:
flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
state_name, state_values, unflat_param_names,
)
else:
assert are_non_tensors
flat_state[state_name] = _flatten_non_tensor_optim_state(
state_name, state_values, unflat_param_names,
)
return flat_state
def _flatten_tensor_optim_state(
state_name: str,
pos_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
unflat_param_shapes: List[torch.Size],
flat_param: FlatParameter,
) -> torch.Tensor:
"""
Flattens the positive-dimension tensor optimizer state given by the values
``tensors`` for the state ``state_name`` for a single flattened parameter
``flat_param`` corresponding to the unflattened parameter names
``unflat_param_names`` and unflatted parameter shapes
``unflat_param_shapes``. This flattens each unflattened parameter's tensor
state into one tensor.
NOTE: We use zero tensors for any unflattened parameters without state
since some value is required to fill those entries. This assumes that the
zero tensor is mathematically equivalent to having no state, which is true
for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
optimizers.
Args:
state_name (str): Optimizer state name.
pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
optimizer state values for the unflattened parameters corresponding
to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
corresponding to the single flattened parameter.
flat_param (FlatParameter): The flattened parameter.
Returns:
torch.Tensor: A flattened tensor containing the optimizer state
corresponding to ``state_name`` constructed by concatenating the
unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
tensors for any unflattened parameters without the state).
"""
non_none_tensors = [t for t in pos_dim_tensors if t is not None]
# Check that all are tensors with the same dtype
dtypes = set(t.dtype for t in non_none_tensors)
if len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have positive-dimension tensor state with the "
f"same dtype but got dtypes {dtypes} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
dtype = next(iter(dtypes))
# Check that each tensor state matches its parameter's shape
for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
if tensor is None and len(shape) == 0:
raise ValueError(
"Flattening a zero-dimension parameter is not supported"
)
elif tensor is not None and tensor.shape != shape:
raise ValueError(
"Tensor optimizer state does not have same shape as its "
f"parameter: {tensor.shape} {shape}"
)
# Flatten the tensor states: we do not need to add any padding since the
# flattened optimizer state tensor sharded via `_get_shard()`, which pads
# the shard as needed (just like for the flattened parameter)
cpu_device = torch.device("cpu")
tensors = [
torch.flatten(state_value.to(cpu_device)) if state_value is not None
else torch.flatten(torch.zeros(
size=shape, dtype=dtype, device=cpu_device,
))
for state_value, shape
in zip(pos_dim_tensors, unflat_param_shapes)
]
flat_tensor = torch.cat(tensors)
flat_param_shape = flat_param._orig_size # type: ignore[attr-defined]
assert flat_tensor.shape == flat_param_shape, \
f"tensor optim state: {flat_tensor.shape} " \
f"flattened parameter: {flat_param_shape}"
return flat_tensor
def _flatten_zero_dim_tensor_optim_state(
state_name: str,
zero_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
) -> torch.Tensor:
"""
Flattens the zero-dimension tensor optimizer state given by the values
``zero_dim_tensors`` for the state ``state_name`` for a single flattened
parameter corresponding to the unflattened parameter names
``unflat_param_names`` by enforcing that all tensors are the same and using
that common value.
NOTE: The requirement that the tensors are the same across all unflattened
parameters comprising the flattened parameter is needed to maintain the
invariant that FSDP performs the same computation as its non-sharded
equivalent. This means that none of the unflattened parameters can be
missing this state since imposing a value may differ from having no value.
For example, for Adam's "step", no value means maximum bias correction,
while having some positive value means less bias correction.
Args:
state_name (str): Optimizer state name.
zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
for the unflattened parameters corresponding to the single
flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
Returns:
torch.Tensor: A zero-dimensional tensor giving the value of the state
``state_name`` for all unflattened parameters corresponding to the
names ``unflat_param_names``.
"""
non_none_tensors = [t for t in zero_dim_tensors if t is not None]
# Enforce that all have the same value and dtype
values_set = set(t.item() if t is not None else None for t in zero_dim_tensors)
dtypes = set(t.dtype if t is not None else None for t in zero_dim_tensors)
if len(non_none_tensors) != len(zero_dim_tensors) or \
len(values_set) != 1 or len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have scalar state with the same value and dtype "
f"but got values {values_set} and dtypes {dtypes} for state "
f"{state_name} and unflattened parameter names "
f"{unflat_param_names}"
)
value = next(iter(values_set))
dtype = next(iter(dtypes))
return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
def _flatten_non_tensor_optim_state(
state_name: str,
non_tensors: List[Any],
unflat_param_names: List[str],
) -> Any:
"""
Flattens the non-tensor optimizer state given by the values ``non_tensors``
for the state ``state_name`` for a single flattened parameter corresponding
to the unflattened parameter names ``unflat_param_names`` by enforcing that
all values are the same and using that common value.
See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
Args:
state_name (str): Optimizer state name.
non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
parameters corresponding to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
Returns:
Any: A non-tensor giving the value of the state ``state_name`` for all
unflattened parameters corresponding to the names
``unflat_param_names``.
"""
non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
# Enforce that all have the same value (same type already checked)
non_tensor_set = set(non_tensors)
if len(non_none_non_tensors) != len(non_tensors) or \
len(non_tensor_set) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have scalar state with the same value and dtype "
f"but got values {non_tensor_set} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
non_tensor = next(iter(non_tensor_set))
return non_tensor
def _process_pos_dim_tensor_state(
flat_optim_state_dict: Dict[str, Any],
world_size: int,
) -> Dict[str, Any]:
"""
Processes positive-dimension tensor states in ``flat_optim_state_dict`` by
replacing them with metadata. This is done so the processed optimizer state
dict can be broadcast from rank 0 to all ranks without copying those tensor
states, and thus, this is meant to only be called on rank 0.
Args:
flat_optim_state_dict (Dict[str, Any]): Flattened optimizer state dict
with the positive-dimension tensor states unsharded.
Returns:
Dict[str, Any]: The flattened optimizer state dict with positive-
dimension tensor states replaced by metadata.
"""
flat_osd = flat_optim_state_dict # alias
no_tensor_osd: Dict[str, Any] = {"state": {}}
for key, param_state in flat_osd["state"].items():
no_tensor_osd["state"][key] = {}
for state_name, value in param_state.items():
is_pos_dim_tensor_state = torch.is_tensor(value) and value.dim() > 0
if not is_pos_dim_tensor_state:
no_tensor_osd["state"][key][state_name] = value
continue
if key.is_flat_param: # FSDP parameter
chunk, num_to_pad = FSDP.FullyShardedDataParallel._get_chunk(
value, rank=0, world_size=world_size,
)
assert len(chunk.shape) == 1, f"Chunk should be 1D but got {chunk.shape}"
info = _PosDimTensorInfo(torch.Size([chunk.shape[0] + num_to_pad]), chunk.dtype)
else: # non-FSDP parameter
info = _PosDimTensorInfo(value.shape, value.dtype)
no_tensor_osd["state"][key][state_name] = info
no_tensor_osd["param_groups"] = flat_osd["param_groups"]
return no_tensor_osd
def _broadcast_processed_optim_state_dict(
processed_optim_state_dict: Optional[Dict[str, Any]],
rank: int,
group,
device: torch.device,
) -> Dict[str, Any]:
"""
Broadcasts the processed optimizer state dict from rank 0 to all ranks.
Args:
processed_optim_state_dict (Optional[Dict[str, Any]]): The flattened
optimizer state dict with positive-dimension tensor states replaced
with metadata if on rank 0; ignored otherwise.
device (torch.device): Device to move zero-dimension tensors post-
broadcast.
Returns:
Dict[str, Any]: The processed optimizer state dict.
"""
# Broadcast the two data structures rank 0 to all ranks
obj_list = [processed_optim_state_dict] if rank == 0 \
else [None]
dist.broadcast_object_list(obj_list, src=0, group=group)
processed_optim_state_dict = obj_list[0] # type: ignore[assignment]
assert processed_optim_state_dict is not None
# Move zero-dimension tensors to `device`
for param_state in processed_optim_state_dict["state"].values():
for state_name, value in param_state.items():
if _is_zero_dim_tensor(value):
param_state[state_name] = value.to(device)
return processed_optim_state_dict
def _broadcast_pos_dim_tensor_states(
processed_optim_state_dict: Dict[str, Any],
flat_optim_state_dict: Optional[Dict[str, Any]],
rank: int,
world_size: int,
group,
broadcast_device: torch.device,
) -> Dict[str, Any]:
"""
Takes ``processed_optim_state_dict``, which has metadata in place of
positive-dimension tensor states, and broadcasts those tensor states from
rank 0 to all ranks. For tensor states corresponding to FSDP parameters,
rank 0 shards the tensor and broadcasts shard-by-shard, and for tensor
states corresponding to non-FSDP parameters, rank 0 broadcasts the full
tensor.
Args:
processed_optim_state_dict (Dict[str, Any]): The flattened optimizer
state dict with positive-dimension tensor states replaced with
metadata; this should be returned by
:meth:`_process_pos_dim_tensor_state` and non-empty on all ranks.
flat_optim_state_dict (Optional[Dict[str, Any]]): The flattened
unsharded optimizer state dict with the actual positive-dimension
tensor states if on rank 0; ignored on nonzero ranks.
Returns:
Dict[str, Any]: The optimizer state dict with the positive-dimension
tensor state correctly populated via ``broadcast()`` s from rank 0.
"""
assert rank != 0 or flat_optim_state_dict is not None, \
"Expects rank 0 to pass in the flattened optimizer state dict"
no_tensor_osd = processed_optim_state_dict # alias
flat_osd = flat_optim_state_dict # alias
for key, param_state in no_tensor_osd["state"].items():
for state_name, value in param_state.items():
is_pos_dim_tensor_state = isinstance(value, _PosDimTensorInfo)
if not is_pos_dim_tensor_state:
continue
if rank == 0:
assert flat_osd is not None
unsharded_tensor = flat_osd["state"][key][state_name]
else:
unsharded_tensor = None
shape, dtype = value.shape, value.dtype
if key.is_flat_param: # FSDP parameter
_broadcast_sharded_pos_dim_tensor_state(
unsharded_tensor, param_state, state_name, shape, dtype,
broadcast_device, rank, world_size, group,
) # modify `param_state` destructively
else: # non-FSDP parameter
_broadcast_unsharded_pos_dim_tensor_state(
unsharded_tensor, param_state, state_name, shape, dtype,
broadcast_device, rank, group,
) # modify `param_state` destructively
return no_tensor_osd
def _broadcast_sharded_pos_dim_tensor_state(
unsharded_tensor: Optional[torch.Tensor],
param_state: Dict[str, Any],
state_name: str,
shape: torch.Size,
dtype: torch.dtype,
broadcast_device: torch.device,
rank: int,
world_size: int,
group,
) -> None:
"""
Broadcasts positive-dimension tensor state for the state ``state_name``
corresponding to an FSDP parameter shard-by-shard, only to be saved on the
relevant rank. This modifies ``param_state`` destructively.
Args:
unsharded_tensor (Optional[torch.Tensor]): Unsharded tensor from which
to broadcast shards if on rank 0; ignored otherwise.
shape (torch.Size): Shape of the sharded tensor; same on all ranks.
"""
get_shard: Optional[functools.partial[Tuple[torch.Tensor, int]]] = None
if rank == 0:
assert unsharded_tensor is not None, \
"Expects rank 0 to pass in the unsharded tensor"
get_shard = functools.partial(
FSDP.FullyShardedDataParallel._get_shard_functional,
unsharded_tensor,
)
for target_rank in range(1, world_size):
if rank == 0:
assert get_shard is not None
sharded_tensor = get_shard(target_rank, world_size)[0].to(broadcast_device)
else:
sharded_tensor = torch.zeros(
shape, requires_grad=False, dtype=dtype,
device=broadcast_device,
)
dist.broadcast(sharded_tensor, src=0, group=group)
# Only keep the shard on the target rank and keep it on the broadcast
# device, which is typically GPU
if rank == target_rank:
param_state[state_name] = sharded_tensor
else:
del sharded_tensor
# Lastly, shard on rank 0
if rank != 0:
return
param_state[state_name] = get_shard(0, world_size)[0].to(broadcast_device) # type: ignore[misc]
def _broadcast_unsharded_pos_dim_tensor_state(
unsharded_tensor: Optional[torch.Tensor],
param_state: Dict[str, Any],
state_name: str,
shape: torch.Size,
dtype: torch.dtype,
broadcast_device: torch.device,
rank: int,
group,
) -> None:
"""
Broadcasts positive-dimension tensor state for the state ``state_name``
corresponding to an unsharded non-FSDP parameter from rank 0 to all ranks.
This modifies ``param_state`` destructively.
Args:
unsharded_tensor (Optional[torch.Tensor]): Unsharded tensor to
broadcast if on rank 0; ignored otherwise.
"""
if rank == 0:
assert unsharded_tensor is not None, \
"Expects rank 0 to pass in the unsharded tensor"
assert shape == unsharded_tensor.shape, \
f"Shape mismatch: {shape} {unsharded_tensor.shape}"
assert dtype == unsharded_tensor.dtype, \
f"dtype mismatch: {dtype} {unsharded_tensor.dtype}"
unsharded_tensor = unsharded_tensor.to(broadcast_device)
else:
unsharded_tensor = torch.zeros(
shape, requires_grad=False, dtype=dtype, device=broadcast_device,
)
dist.broadcast(unsharded_tensor, src=0, group=group)
# Keep the tensor on the broadcast device, which is typically GPU
param_state[state_name] = unsharded_tensor
def _rekey_sharded_optim_state_dict(
sharded_osd: Dict[str, Any],
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[str, Any]:
"""
Rekeys the optimizer state dict from unflattened parameter names to
flattened parameter IDs according to the calling rank's ``optim_input``,
which may be different across ranks. In particular, the unflattened
parameter names are represented as :class:`_OptimStateKey` s.
"""
param_to_flat_param_id = _get_param_to_param_id(model, optim_input)
param_to_unflat_param_names = FSDP._get_param_to_unflat_param_names(model)
# All parameter keys in `param_to_flat_param_id` should be in
# `param_to_unflat_param_names` -- strict inequality follows when not all
# parameters are passed to the optimizer via `optim_input`
assert len(param_to_flat_param_id) <= len(param_to_unflat_param_names)
unflat_param_names_to_flat_param_id: Dict[Tuple[str, ...], int] = {} # for "state"
unflat_param_name_to_flat_param_id: Dict[str, int] = {} # for "param_groups"
for param, unflat_param_names in param_to_unflat_param_names.items():
if param not in param_to_flat_param_id:
# This parameter was not passed to the optimizer via `optim_input`
continue
flat_param_id = param_to_flat_param_id[param]
unflat_param_names_to_flat_param_id[tuple(unflat_param_names)] = flat_param_id
for unflat_param_name in unflat_param_names:
unflat_param_name_to_flat_param_id[unflat_param_name] = flat_param_id
sharded_osd_state = sharded_osd["state"]
rekeyed_osd_state = {}
for key, param_state in sharded_osd_state.items():
flat_param_id = unflat_param_names_to_flat_param_id[key.unflat_param_names]
rekeyed_osd_state[flat_param_id] = param_state
rekeyed_osd_param_groups: List[Dict[str, Any]] = []
for unflat_param_group in sharded_osd["param_groups"]:
flat_param_group = copy.deepcopy(unflat_param_group)
flat_param_ids = sorted(set(
unflat_param_name_to_flat_param_id[unflat_param_name]
for unflat_param_name in unflat_param_group["params"]
))
flat_param_group["params"] = flat_param_ids
rekeyed_osd_param_groups.append(flat_param_group)
return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups}
def _get_flat_param_to_fsdp_module(model: torch.nn.Module):
"""
Constructs a mapping from FSDP flattened parameters to their owning FSDP
modules and ensures that all FSDP modules are initialized.
Args:
model (torch.nn.model): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance).
Returns:
Dict[FlatParameter, FullyShardedDataParallel]: Mapping from FSDP
flattened parameters to their owning FSDP modules.
"""
flat_param_to_fsdp_module = {}
for module in model.modules():
if isinstance(module, FSDP.FullyShardedDataParallel):
module._lazy_init()
for param in module.params: # may have none
flat_param_to_fsdp_module[param] = module
return flat_param_to_fsdp_module
def _get_param_id_to_param(
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> List[torch.nn.Parameter]:
"""
Constructs a mapping from parameter IDs to parameters. This may be used
both for models with ``FlatParameter`` s and without.
NOTE: We critically assume that, whether the optimizer input is a list of
parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
enumerates the parameter IDs in order. In other words, for a parameter list
input, the parameter IDs should be in that list order, and for a parameter
groups input, the parameter IDs should be in order within each parameter
group and in order across parameter groups.
Args:
model (torch.nn.Module): Model whose parameters are passed into the
optimizer.
optim_input (Optional[Union[List[Dict[str, Any]],
Iterable[torch.nn.Parameter]]]): Input passed into the optimizer
representing either a :class:`list` of parameter groups or an
iterable of parameters; if ``None``, then this method assumes the
input was ``model.parameters()``. (Default: ``None``)
Returns:
List[torch.nn.Parameter]: Mapping from parameter IDs to parameters,
where the parameter ID is implicitly the index in the :class:`list`.
"""
# Assume the standard case of passing `model.parameters()` to the optimizer
# if `optim_input` is not specified
if optim_input is None:
return list(model.parameters())
try:
params = list(optim_input)
except TypeError:
raise TypeError(
"Optimizer input should be an iterable of Tensors or dicts, "
f"but got {optim_input}"
)
if len(params) == 0:
raise ValueError("Optimizer input should not be empty")
# Check if the optimizer input represents tensors or parameter groups
all_tensors = True
all_dicts = True
for param in params:
all_tensors &= isinstance(param, torch.Tensor)
all_dicts &= isinstance(param, dict)
if not all_tensors and not all_dicts:
raise TypeError(
"Optimizer input should be an iterable of Tensors or dicts"
)
if all_tensors:
return params # type: ignore[return-value]
assert all_dicts
param_id_to_param = []
for param_group in params:
has_params_key = "params" in param_group # type: ignore[operator]
assert has_params_key, \
"A parameter group should map \"params\" to a list of the " \
"parameters in the group"
for param in param_group["params"]: # type: ignore[index]
# Implicitly map `flat_param_id` (current length of the list) to
# `param`
param_id_to_param.append(param)
return param_id_to_param # type: ignore[return-value]
def _get_param_to_param_id(
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[torch.nn.Parameter, int]:
"""Constructs the inverse mapping of :func:`_get_param_id_to_param`."""
param_id_to_param = _get_param_id_to_param(model, optim_input)
return {
param: param_id for param_id, param in enumerate(param_id_to_param)
}
def _get_unflat_to_flat_param_ids(
flat_to_unflat_param_ids: Dict[int, List[int]],
) -> List[int]:
"""
Inverts the mapping ``flat_to_unflat_param_ids`` to be from unflattened
parameter ID to flattened parameter ID, where the unflattened parameter ID
is the index in the returned :class:`list`. There may be multiple
unflattened parameter IDs mapping to the same flattened parameter ID.
Args:
flat_to_unflat_param_ids (Dict[int, List[int]]): A mapping from
flattened parameter ID to a :class:`list` of corresponding
unflattened parameter IDs.
Returns:
List[int]: A mapping from unflattened parameter ID to flattened
parameter ID, where the unflattened parameter ID is the index in the
:class:`list`.
"""
# Construct as a dict and then convert to list
unflat_to_flat_param_ids = {}
for flat_param_id, unflat_param_ids in flat_to_unflat_param_ids.items():
for unflat_param_id in unflat_param_ids:
assert unflat_param_id not in unflat_to_flat_param_ids, \
"`flat_to_unflat_param_ids` has the unflattened parameter " \
f"ID {unflat_param_id} mapped to multiple flattened " \
"parameter IDs"
unflat_to_flat_param_ids[unflat_param_id] = flat_param_id
num_unflat_param_ids = len(unflat_to_flat_param_ids)
unflat_param_ids_set = set(unflat_to_flat_param_ids.keys())
assert unflat_param_ids_set == set(range(num_unflat_param_ids)), \
"The set of unflattened parameter IDs should be {0, ..., " + \
str(num_unflat_param_ids - 1) + "} but got " + \
f"{unflat_param_ids_set}"
return [
unflat_to_flat_param_ids[unflat_param_id]
for unflat_param_id in range(num_unflat_param_ids)
]
def _is_zero_dim_tensor(x: Any) -> bool:
return torch.is_tensor(x) and x.dim() == 0
| [
"[email protected]"
] | |
f498651857a6a53c8e4ebfeb0f204f23da1d3690 | 5b683c7f0cc23b1a2b8927755f5831148f4f7e1c | /Python_Study/DataStructureAndAlgorithm/company_programming_test/度小满/duxiaoman_1.py | 44a022a61f6419461c25e2f49b922a112256b254 | [] | no_license | Shmilyqjj/Shmily-py | 970def5a53a77aa33b93404e18c57130f134772a | 770fc26607ad3e05a4d7774a769bc742582c7b64 | refs/heads/master | 2023-09-02T04:43:39.192052 | 2023-08-31T03:28:39 | 2023-08-31T03:28:39 | 199,372,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
Description:
Author:jiajing_qu
Date: 2019/9/15 19:44
穿越障碍物
时间限制:C/C++语言 1000MS;其他语言 3000MS
内存限制:C/C++语言 65536KB;其他语言 589824KB
题目描述:
你现在在(0,0),需要到(x,y)去,路上有n个障碍物。给出每个障碍物的坐标,你只能平行于坐标轴走整数步,问你最少需要多少步才能走到目的地。
输入
第一行三个数x,y,n
接下来n行,每行描述一个障碍物的坐标x_i,y_i
-500≤x,y,x_i,y_i≤500
n≤10000
保证有解
输出
输出一个数,代表最少的步数。
样例输入
2 0 3
1 0
1 1
1 -1
样例输出
6
"""
| [
"[email protected]"
] | |
9e417c4b3a95f7d673fce3c57bf7c5e06c2e5714 | d6c117812a618ff34055488337aaffea8cf81ca1 | /database/TupleSqlite3.py | 27c1e232d6c79bc55a122c9de4b5b04e955b77e3 | [] | no_license | c0ns0le/Pythonista | 44829969f28783b040dd90b46d08c36cc7a1f590 | 4caba2d48508eafa2477370923e96132947d7b24 | refs/heads/master | 2023-01-21T19:44:28.968799 | 2016-04-01T22:34:04 | 2016-04-01T22:34:04 | 55,368,932 | 3 | 0 | null | 2023-01-22T01:26:07 | 2016-04-03T21:04:40 | Python | UTF-8 | Python | false | false | 1,880 | py | # coding: utf-8
# https://forum.omz-software.com/topic/2375/problem-with-list-comprehension
from collections import namedtuple
import sqlite3
from random import randint
from faker import Faker
fake = Faker()
my_def = {'namedtuple_name': 'REC',
'field_names' :[('id' , 'INTEGER PRIMARY KEY'), ('resid','INTEGER UNIQUE'), ('key','TEXT') , ('ord','INTEGER'), ('value', 'INTEGER'), ('value1','TEXT'), ('data','TEXT'), ('pickled', 'INTEGER'),]
,}
'''
my_def = {'namedtuple_name': 'REC',
'field_names' :[('id' , 'INTEGER PRIMARY KEY'), ('resid','INTEGER UNIQUE'), ('key','TEXT') , ('ord','INTEGER'), ('data','TEXT'),]
,}
'''
MY_REC = my_def['namedtuple_name']
MY_REC = namedtuple(my_def['namedtuple_name'],[fld[0] for fld in my_def['field_names']])
MY_REC.__new__.__defaults__ = tuple((None for x in range(0,len(MY_REC._fields))))
mytbl_def = MY_REC._make(val[1] for val in my_def['field_names'])
_table_sql_new = '''CREATE TABLE IF NOT EXISTS '{0}' ({1})'''.format('{0}', ', '.join(mytbl_def._fields[i] + ' ' + item for i, item in enumerate(mytbl_def)) )
insert_pattern = '({0})'.format(','.join( c for c in str('?' * len(MY_REC._fields))))
_insert_sql = ''' INSERT INTO {0} VALUES ''' + insert_pattern
if __name__ == '__main__':
db_name = 'test.db'
db_table = 'table_c'
db_num_recs_to_add = 51
db = sqlite3.connect(db_name)
db.execute(_table_sql_new.format(db_table))
# using randint() for testing...resid is unquie
for i in range(1, db_num_recs_to_add):
r = MY_REC(resid = randint(1, 500000), key = fake.city(), data = fake.first_name())
db.execute(_insert_sql.format(db_table), [v for v in r])
db.commit()
cur = db.execute('SELECT * FROM {0}'.format(db_table))
for row in cur:
print repr(row)
db.close()
| [
"[email protected]"
] | |
84a5f8ae828e44cebcb306f6837843ac1f96503a | 48b9d828acf80792bc4385febaa734a2e96ad465 | /test-openmps/Examples/08_Heisenberg_spinone_iMPS.py | cb60a6f9b6a1ef388cf4d116e46e08b2104d414d | [
"MIT"
] | permissive | OminiaVincit/qphase-trans | dd4ab2e0cacc449ead3bef318a65eb05aed45621 | 40e0c078dcd74282e8d8f44690433bf670bff8cb | refs/heads/master | 2023-05-06T12:14:30.368375 | 2021-05-28T05:11:58 | 2021-05-28T05:11:58 | 235,478,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,663 | py | import MPSPyLib as mps
import numpy as np
import sys
import os.path
def main(PostProcess=False):
# Build operators
Operators = mps.BuildSpinOperators(spin=1.0)
# Define Hamiltonian MPO
H = mps.MPO(Operators)
H.AddMPOTerm('bond', ['splus', 'sminus'], hparam='J_xy', weight=0.5)
H.AddMPOTerm('bond', ['sz','sz'], hparam='J_z', weight=1.0)
# Ground state observables
myObservables = mps.Observables(Operators)
# Site terms
myObservables.AddObservable('site', 'sz', 'z')
# correlation functions
myObservables.AddObservable('corr', ['sz', 'sz'], 'zz')
myObservables.AddObservable('corr', ['splus', 'sminus'], 'pm')
# Get correlation functions out to a distance of 1000
myObservables.SpecifyCorrelationRange(1000)
# Convergence parameters
myConv = mps.iMPSConvParam(max_bond_dimension=12, variance_tol=-1.0,
max_num_imps_iter=1000)
mod_list = ['max_bond_dimension','max_num_imps_iter']
myConv.AddModifiedConvergenceParameters(0, mod_list, [20, 500])
myConv.AddModifiedConvergenceParameters(0, mod_list, [40, 250])
# Long run time (Enable if you prefer)
#myConv.AddModifiedConvergenceParameters(0, mod_list, [60, 250])
#myConv.AddModifiedConvergenceParameters(0, mod_list, [80, 250])
L = 2
# Define statics
parameters = [{
# Directories
'job_ID' : 'Spin1.0Heisenberg',
'Write_Directory' : 'TMP_08/',
'Output_Directory' : 'OUTPUTS_08/',
# System size and Hamiltonian parameters
'L' : L,
'J_z' : 1.0,
'J_xy' : 1.0,
'simtype' : 'Infinite',
# Convergence parameters
'MPSObservables' : myObservables,
'MPSConvergenceParameters' : myConv,
'logfile' : True
}]
# Write Fortran-readable main files
MainFiles = mps.WriteFiles(parameters, Operators, H,
PostProcess=PostProcess)
# Run the simulations
if(not PostProcess):
if os.path.isfile('./Execute_MPSMain'):
RunDir = './'
else:
RunDir = None
mps.runMPS(MainFiles, RunDir=RunDir)
return
# Postprocessing and plotting
# ---------------------------
Outputs = mps.ReadStaticObservables(parameters)
clfilename = parameters[0]['job_ID'] + 'correlationLength.dat'
clfile = open(clfilename, 'w')
for Output in Outputs:
chi = Output['max_bond_dimension']
state = Output['state']
print('Chi', chi, 'state', state,
'energy density', Output['energy_density'])
if(state == 0):
corrfilename = parameters[0]['job_ID'] + 'chi' + str(chi) \
+ 'corr.dat'
corrfile = open(corrfilename, 'w')
for ii in range(0, myObservables.correlation_range):
corrfile.write('%16i'%(ii) + '%30.15E'%(Output['zz'][ii])
+ '%30.15E'%(Output['pm'][ii]) + '\n')
corrfile.close()
clfile.write('%16i'%(chi) + '%30.15E'%(Output['Correlation_length'])
+ '\n')
print(sum(Output['z']), Output['zz'][0:6])
clfile.close()
return
if(__name__ == '__main__'):
# Check for command line arguments
Post = False
for arg in sys.argv[1:]:
key, val = arg.split('=')
if(key == '--PostProcess'): Post = (val == 'T') or (val == 'True')
# Run main function
main(PostProcess=Post)
| [
"[email protected]"
] | |
53b08e77991e0b1c805ef8a3b886ebef4fe0b617 | 47a17b7b649e90ad0eedb270603193eb55703dba | /webapps/API/settings/requests.py | e8da027820d888797c2f58b52f88f9ddf45fa539 | [] | no_license | omiguelperez/kaumer-django-knowledge-test | 25ff72f0e91e8ebeb542b80921b7083addf2cc3b | 63da7943d00e37f4ee8edf6c7cc0cb46656deabf | refs/heads/master | 2021-09-28T19:09:49.716506 | 2018-11-19T19:15:38 | 2018-11-19T19:15:38 | 157,953,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # -*- coding: utf-8 -*-
from rest_framework import serializers
# noinspection PyAbstractClass
class CreateSettingSerializer(serializers.Serializer):
basic_salary = serializers.FloatField()
transport_assistance = serializers.FloatField()
holiday_percentage = serializers.FloatField()
unemployment_percentage = serializers.FloatField()
unemployment_interest = serializers.FloatField()
premium_services = serializers.FloatField()
health_percentage = serializers.FloatField()
pension_percentage = serializers.FloatField()
occupational_hazards = serializers.FloatField()
cash_contributions = serializers.FloatField()
| [
"[email protected]"
] | |
340a21ad7d528e64aeca54c6fcb9ac365b69db55 | 47c2b01b04ed3ea7c55875b5ea412d90becd970b | /tests/physical_system/test_routing.py | 256b1994db12aaa6e854f0f9c384573f153dc669 | [] | no_license | adysonmaia/phd-sp-dynamic | f2df9bee38a0246f40739a8e413ec4cb832ab03f | ce7045918f60c92ce1ed5ca4389b969bf28e6b82 | refs/heads/master | 2023-04-03T20:10:32.593381 | 2020-12-28T11:12:06 | 2020-12-28T11:12:06 | 355,110,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,132 | py | from sp.core.model import Scenario, System, EnvironmentInput
from sp.physical_system.routing.shortest_path import ShortestPathRouting
from sp.physical_system.estimator import DefaultLinkDelayEstimator
import json
import unittest
class RoutingTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = "tests/physical_system/fixtures/test_routing.json"
system = None
with open(filename) as json_file:
data = json.load(json_file)
system = System()
system.scenario = Scenario.from_json(data)
cls.system = system
def setUp(self):
self.assertIsInstance(self.system, System)
self.assertEqual(len(self.system.nodes), 11)
self.assertEqual(len(self.system.bs_nodes), 9)
self.assertEqual(len(self.system.users), 1)
self.assertEqual(len(self.system.apps), 1)
self.assertEqual(self.system.apps[0].id, 0)
time = 0
self.system.time = time
self.environment = EnvironmentInput.create_empty(self.system)
def test_shortest_path(self):
app_id = 0
app = self.system.get_app(app_id)
routing = ShortestPathRouting()
routing.static_routing = True
routing.link_delay_estimator = DefaultLinkDelayEstimator()
routing.update(self.system, self.environment)
for link in self.system.links:
l_nodes = list(link.nodes_id)
path = routing.get_path(app.id, *l_nodes)
dist = routing.get_path_length(app.id, *l_nodes)
self.assertListEqual(l_nodes, path)
self.assertEqual(dist, 1.001)
for node in self.system.nodes:
path = routing.get_path(app.id, node.id, node.id)
dist = routing.get_path_length(app.id, node.id, node.id)
self.assertListEqual(path, [])
self.assertEqual(dist, 0.0)
path = routing.get_path(app.id, 0, 10)
dist = routing.get_path_length(app.id, 0, 10)
self.assertEqual(len(path), 7)
self.assertEqual(round(dist, 3), 6.006)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b74fb1a17ed2f7316dcf55fb45d1f45bf67c4f0f | 8cbd55d35a179dff6a7c23a6835bcd329cba8bee | /simplekiq/base.py | 6a751156867038799c739df70a90f232355e729c | [
"Apache-2.0"
] | permissive | charsyam/simplekiq | 31e3dac37981f48d80cbf8c8d921bebf8cf8a7cc | cd8b02078e06af64d79c5498af55fcdfbaf81676 | refs/heads/main | 2023-06-24T18:08:47.468046 | 2021-07-28T15:59:57 | 2021-07-28T15:59:57 | 380,718,347 | 0 | 0 | Apache-2.0 | 2021-07-28T15:59:58 | 2021-06-27T11:06:15 | Python | UTF-8 | Python | false | false | 1,413 | py | from .constants import Constants
import json
import redis
class KiqQueue:
def __init__(self, addr, name, create=True):
if not name:
raise Exception("Queue name should be supported")
self.addr = addr
self.conn = self.connect_to_redis(addr)
self._name = name
self._queue_name = Constants.QUEUE_TPL.format(name)
if create:
self.conn.sadd(Constants.QUEUES_NAME, self._name)
def connect_to_redis(self, addr):
return redis.from_url(f"redis://{addr}/")
@property
def name(self):
return self._name
@property
def queue_name(self):
return self._queue_name
def enqueue(self, event):
try:
self.conn.rpush(self.queue_name, json.dumps(event))
return True
except redis.exceptions.ConnectionError as e:
self.conn = self.connect_to_redis(self.addr)
return False
def dequeue(self, wait=True):
try:
if wait:
v = self.conn.blpop(self.queue_name)[1]
else:
v = self.conn.lpop(self.queue_name)
if v:
return json.loads(v.decode('utf-8'))
else:
return None
except redis.exceptions.ConnectionError as e:
self.conn = self.connect_to_redis(self.addr)
return None
| [
"[email protected]"
] | |
7a05c0aaddc62ea69efb81d1cbd4b5c08f771f64 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/test_increment.py | 34c7af4ac081e315c1694b8ef917fc2f08febfa0 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 2,252 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
class TestIncrement(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.layers.fill_constant(shape=[1],
dtype='int64',
value=5)
expected_result = np.array([8], dtype='int64')
output = paddle.tensor.math.increment(input, value=3)
exe = fluid.Executor(fluid.CPUPlace())
result = exe.run(fetch_list=[output])
self.assertEqual((result == expected_result).all(), True)
with fluid.dygraph.guard():
input = paddle.ones(shape=[1], dtype='int64')
expected_result = np.array([2], dtype='int64')
output = paddle.tensor.math.increment(input, value=1)
self.assertEqual((output.numpy() == expected_result).all(), True)
class TestInplaceApiWithDataTransform(unittest.TestCase):
def test_increment(self):
if fluid.core.is_compiled_with_cuda():
paddle.enable_static()
with paddle.fluid.device_guard("gpu:0"):
x = paddle.fluid.layers.fill_constant([1], "float32", 0)
with paddle.fluid.device_guard("cpu"):
x = paddle.increment(x)
exe = paddle.static.Executor(paddle.CUDAPlace(0))
a, = exe.run(paddle.static.default_main_program(), fetch_list=[x])
paddle.disable_static()
self.assertEqual(a[0], 1)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
e05ca1e744cc58f496b63f7b2feb5c87157fb2bc | 99d7a6448a15e7770e3b6f3859da043300097136 | /src/mv/segment/region.py | b533e040ccccc4667f5e6e485620c1628d3b316d | [] | no_license | softtrainee/arlab | 125c5943f83b37bc7431ae985ac7b936e08a8fe4 | b691b6be8214dcb56921c55daed4d009b0b62027 | refs/heads/master | 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,357 | py | #===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import HasTraits, Bool
from traitsui.api import View, Item, TableEditor
#============= standard library imports ========================
from numpy import zeros_like, invert, percentile, ones_like
from skimage.filter import sobel, threshold_adaptive
from skimage.morphology import watershed
#============= local library imports ==========================
from src.mv.segment.base import BaseSegmenter
# from skimage.exposure.exposure import rescale_intensity
# from scipy.ndimage.morphology import binary_closing
cnt = 0
class RegionSegmenter(BaseSegmenter):
use_adaptive_threshold = Bool(True)
threshold_low = 0
threshold_high = 255
block_size = 20
def segment(self, src):
'''
src: preprocessing cv.Mat
'''
image = src.ndarray[:]
if self.use_adaptive_threshold:
# block_size = 25
markers = threshold_adaptive(image, self.block_size)
n = markers[:].astype('uint8')
n[markers == True] = 255
n[markers == False] = 1
markers = n
# print markers
# markers = markers.astype('uint8')
# n = ones_like(markers)
# n[markers] = 255
# print n
# markers[markers] = 255
# markers[not markers] = 1
# print markers
# markers = n.astype('uint8')
# markers = invert(markers).astype('uint8')
else:
markers = zeros_like(image)
markers[image < self.threshold_low] = 1
markers[image > self.threshold_high] = 255
# global cnt
# # remove holes
# if cnt % 2 == 0:
# markers = binary_closing(markers).astype('uint8') * 255
# cnt += 1
# print markers
elmap = sobel(image, mask=image)
wsrc = watershed(elmap, markers, mask=image)
return invert(wsrc)
# elmap = ndimage.distance_transform_edt(image)
# local_maxi = is_local_maximum(elmap, image,
# ones((3, 3))
# )
# markers = ndimage.label(local_maxi)[0]
# wsrc = watershed(-elmap, markers, mask=image)
# fwsrc = ndimage.binary_fill_holes(out)
# return wsrc
# if self.use_inverted_image:
# out = invert(wsrc)
# else:
# out = wsrc
# time.sleep(1)
# do_later(lambda:self.show_image(image, -elmap, out))
# return out
#============= EOF =============================================
| [
"jirhiker@localhost"
] | jirhiker@localhost |
86ab1b6210f05d584469ce7ed92af41e66519780 | 451331db65a364d3b40eb18a1349e4349695dc87 | /FinalFit/datacards/Systematics/PlotLimits.py | c8fe91a84d0e0ab8f2a48a5bef9b9d6d026159a5 | [] | no_license | hbakhshi/HaNaMiniAnalyzer | 97bc5cdd72cd9651979184b4a1e50f6513100210 | 1d658c410318271b0c4981a5fba965924a15edff | refs/heads/master | 2020-12-25T11:00:13.569640 | 2017-10-23T17:24:22 | 2017-10-23T17:24:22 | 60,080,249 | 0 | 1 | null | 2016-10-26T10:40:23 | 2016-05-31T10:28:27 | Python | UTF-8 | Python | false | false | 5,304 | py | from ROOT import TFile, TTree, TObject, TGraphAsymmErrors, TCanvas, kYellow, kBlack
import os
import stat
import array
import math
import shutil
from ROOT import RooWorkspace, TCanvas , RooFit, TColor, kBlue, kRed, kGreen, RooRealVar, RooConstVar, gROOT, TMath
from subprocess import call
import sys
AllNuisances = [ "CMS_hgg_nuisance_MaterialForward_13TeV",
"CMS_hgg_nuisance_ShowerShapeHighR9EB_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EEPhi_13TeV",
"CMS_hgg_nuisance_ShowerShapeHighR9EE_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EBPhi_13TeV",
"CMS_hgg_nuisance_ShowerShapeLowR9EE_13TeV",
"CMS_hgg_nuisance_MCScaleGain1EB_13TeV",
"CMS_hgg_nuisance_MaterialCentralBarrel_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EERho_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EBRho_13TeV",
"CMS_hgg_nuisance_MCScaleGain6EB_13TeV",
"CMS_hgg_nuisance_MCScaleLowR9EB_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EBRho_13TeV",
"CMS_hgg_nuisance_FNUFEB_13TeV",
"CMS_hgg_nuisance_FNUFEE_13TeV",
"CMS_hgg_nuisance_MCScaleLowR9EE_13TeV",
"CMS_hgg_nuisance_MCScaleHighR9EB_13TeV",
"CMS_hgg_nuisance_MaterialOuterBarrel_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EEPhi_13TeV",
"CMS_hgg_nuisance_MCScaleHighR9EE_13TeV",
"CMS_hgg_nuisance_MCSmearLowR9EBPhi_13TeV",
"CMS_hgg_nuisance_MCSmearHighR9EERho_13TeV",
"CMS_hgg_nuisance_ShowerShapeLowR9EB_13TeV"]
def GetLimits( syst_name ):
#path = "./SingleSystINWS11July/higgsCombine%s.Asymptotic.mH125.root" % (syst_name)
path = "./%s/higgsCombine%s.Asymptotic.mH125.root" % (sys.argv[1] , syst_name)
val = -100
val1sigmap = -100
val1sigmam = -100
val2sigmap = -100
val2sigmam = -100
if os.path.exists( path ) :
f = TFile.Open( path )
if f :
limit = f.Get("limit")
if not type(limit) == TTree :
val = -200
val1sigmap = -200
val1sigmam = -200
else :
for i in limit :
if i.quantileExpected == 0.5 :
val = i.limit
elif int(100*i.quantileExpected) in [15,16,17] :
val1sigmam = i.limit
elif int(100*i.quantileExpected) in [83,84,85] :
val1sigmap = i.limit
elif int(100*i.quantileExpected) in [2,3,4]:
val2sigmam = i.limit
elif int(100*i.quantileExpected) in [97,98,99]:
val2sigmap = i.limit
else :
print int(100*i.quantileExpected)
f.Close()
else :
val = -400
val1sigmap = -400
val1sigmam = -400
else:
print path
val = -300
val1sigmap = -300
val1sigmam = -300
if val <= 0 :
val /= 1000
print syst_name, val, val1sigmam, val1sigmap, val2sigmam, val2sigmap
return val, val1sigmam, val1sigmap, val2sigmam, val2sigmap
x = array.array('d')
y = array.array('d')
ex = array.array('d')
ey1sigmap = array.array('d')
ey1sigman = array.array('d')
ey2sigmap = array.array('d')
ey2sigman = array.array('d')
for syst in AllNuisances:
systName = syst.split("_")[3]
val, val1sigmam, val1sigmap, val2sigmam, val2sigmap = GetLimits( systName )
print AllNuisances.index( syst ) , syst
x.append( AllNuisances.index( syst ) )
y.append( val )
ex.append(0)
ey1sigmap.append( abs(val1sigmap-val) )
ey1sigman.append( abs(val1sigmam-val) )
ey2sigmap.append( abs(val2sigmap-val) )
ey2sigman.append( abs(val2sigmam-val) )
Bin = "Systematics"
canvas2 = TCanvas("sigma_bands")
graph_2sigma = TGraphAsymmErrors( len(x) , x , y , ex , ex , ey2sigman , ey2sigmap )
graph_2sigma.SetName( "GraphAsym_2SigmaBand_%s" % ( Bin ))
#graph_2sigma.SetTitle( Bin+ "(" +date+ ")" )
graph_2sigma.SetLineColor( kYellow-4)
graph_2sigma.SetFillColor( kYellow -4)
graph_2sigma.SetFillStyle( 1001 )
graph_2sigma.Draw( "a3" )
graph_1sigma = TGraphAsymmErrors( len(x) , x , y , ex , ex , ey1sigman , ey1sigmap )
graph_1sigma.SetName( "GraphAsym_1SigmaBand_%s" % (Bin ) )
#graph_1sigma.SetTitle( Bin + "(" +date+ ")" )
graph_1sigma.SetLineColor( kGreen - 4)
graph_1sigma.SetFillColor( kGreen -4)
graph_1sigma.SetFillStyle( 1001 )
graph_1sigma.Draw( "3 same" )
graph_1sigma.SetLineColor( kBlack )
graph_1sigma.SetLineWidth( 2 )
graph_1sigma.SetLineStyle( 2 )
graph_1sigma.SetMarkerColor( kBlack )
graph_1sigma.SetMarkerStyle( 0 )
graph_1sigma.Draw("lp X")
xax = graph_2sigma.GetXaxis()
pi = TMath.Pi()
i = 0
while i*pi/3 <= xax.GetXmax():
systName = AllNuisances[i].split("_")[3]
bin_index = xax.FindBin(i*pi/3)
xax.SetBinLabel(bin_index, systName )
i+=1
print i,bin_index,xax.GetBinCenter(bin_index), systName
canvas2.Modified()
canvas2.Update()
| [
"[email protected]"
] | |
bd33c7acbcad90997801a4d28eae63b1c866db4b | 34c88cb508fe7ad10f258d220d645a60463a9063 | /Misc/LUIGenerateGrid.py | b966b2efa47d986046a0f8a62a44e50754c24b0b | [] | no_license | tzaeru/LUI | 5a922f3f830a86c424831a6c5947a413c45f2bb8 | 14f59e24dc45d88f26214d6f51ed3565a7206374 | refs/heads/master | 2021-01-18T07:30:55.618553 | 2015-02-27T13:17:19 | 2015-02-27T14:08:39 | 31,418,557 | 0 | 0 | null | 2015-02-27T12:38:06 | 2015-02-27T12:38:05 | null | UTF-8 | Python | false | false | 1,475 | py |
from os import makedirs
from os.path import dirname, join, isdir
from panda3d.core import *
# Configuration
# source = raw_input("Source png file: ")
# destPath = dirname(source)
# borderSize = int(raw_input("Border size in pixel: "))
source = "btn_green_focus.png"
destPath = "../Builtin/res/"
destName = "ButtonMagicFocus_#.png"
borderSize = 7
def extractSubImage(x, y, w, h, name):
print "Extracting sub image to",name
subPNM = PNMImage(w, h, 4)
subPNM.copySubImage(img, 0, 0, x, y, w, h)
subPNM.write(destPath + destName.replace("#", name))
img = PNMImage(source)
w, h = img.getReadXSize(), img.getReadYSize()
if not isdir(destPath):
makedirs(destPath)
# top left
extractSubImage(0, 0, borderSize, borderSize, "TL")
# top right
extractSubImage(w-borderSize, 0, borderSize, borderSize, "TR")
# bottom left
extractSubImage(0, h-borderSize, borderSize, borderSize, "BL")
# bottom right
extractSubImage(w-borderSize, h-borderSize, borderSize, borderSize, "BR")
# top
extractSubImage(borderSize, 0, w-2*borderSize, borderSize, "Top")
# bottom
extractSubImage(borderSize, h - borderSize, w-2*borderSize, borderSize, "Bottom")
# left
extractSubImage(0, borderSize, borderSize, h-2*borderSize, "Left")
# right
extractSubImage(w-borderSize, borderSize, borderSize, h-2*borderSize, "Right")
# mid
# extractSubImage(borderSize, borderSize, w-2*borderSize, h-2*borderSize, "Mid")
extractSubImage(borderSize, borderSize, 1, h-2*borderSize, "Mid")
| [
"[email protected]"
] | |
732625a0056d57c7d7fb9c8e4a10d6158b1d7766 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/ed53134d1d8147729802412e83a90f33.py | 56931b59d9f26e4f52f9b4e5d7075fe3408da235 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 45 | py | #this program the user interacts with "bob".
| [
"[email protected]"
] | |
029bec16424b0a6a283b9075d4ce821c32a71078 | 9189218d0520ff06fecfa4193466e5662a1628ba | /road_trip/road_trip.py | 19225229adf8a091068a100cfdb2f3c0a783b9d3 | [] | no_license | TStand90/code-eval | 1167e4139a60fead5026ddccb3ba2ede7c8f8666 | 6cbc4eba0cea4d980648cc238c20a3fcbf942aa5 | refs/heads/master | 2021-01-17T13:08:00.461910 | 2016-07-08T22:53:14 | 2016-07-08T22:53:14 | 41,000,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import sys
def main(file_arg):
with open(file_arg) as f:
for line in f:
location_list = line.strip().split(';')
location_list = [location.strip() for location in location_list if location]
distances_from_start = []
for each_location in location_list:
name, distance = each_location.split(',')
distances_from_start.append(int(distance))
distances_from_start = sorted(distances_from_start)
distances = []
distances.append(distances_from_start[0])
for i, distance in enumerate(distances_from_start[1:]):
distances.append(distances_from_start[i+1] - distances_from_start[i])
print(','.join([str(distance) for distance in distances]))
if __name__ == '__main__':
main(sys.argv[1])
| [
"[email protected]"
] | |
529b590d1c69e0ec006f3264aa3e99a1908178bd | d7a4701e18be0f38820f5c15d80099fda6385f9f | /code-festival-2018-quala/A.py | 0169373e8ab1935cb62b53a90e895bcbd3510868 | [] | no_license | shiki7/Atcoder | 979a6f0eeb65f3704ea20a949940a0d5e3434579 | c215c02d3bfe1e9d68846095b1bd706bd4557dd0 | refs/heads/master | 2022-05-21T16:59:01.529489 | 2022-04-29T11:26:42 | 2022-04-29T11:26:42 | 201,536,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | a = int(input())
b = int(input())
c = int(input())
s = int(input())
if 0 <= s - (a + b + c) <= 3:
print('Yes')
else:
print('No')
| [
"[email protected]"
] | |
b691b9bbb53e18dfee434ea542d15b52b5b2b775 | 7c0820998f6ed2f1f5ee82b8b7ffd67c3228bfb6 | /pytest_training/conftest.py | 46ce2679f535f34c569f0dec3ccd74d57edb06d5 | [] | no_license | youinmelin/practice2020 | 5127241eaccf3ec997bb10671008a9a7c5f9d741 | 47d376b6d264141c229b6afcc2be803f41fd611e | refs/heads/master | 2022-12-12T00:28:22.293373 | 2020-09-22T08:29:37 | 2020-09-22T08:29:37 | 237,427,204 | 0 | 0 | null | 2022-11-04T19:10:12 | 2020-01-31T12:38:26 | Python | UTF-8 | Python | false | false | 1,363 | py | import pytest
import tasks
from tasks import Task
@pytest.fixture()
def tasks_db(tmpdir):
'''prepare for the test. before the test, build the db envirment'''
tasks.start_tasks_db(str(tmpdir),'tiny')
yield
tasks.stop_tasks_db()
@pytest.fixture()
def tasks_just_a_few():
"""All summaries and owners are unique."""
return (
Task('Write some code', 'Brian', True),
Task("Code review Brian's code", 'Katie', False),
Task('Fix what Brian did', 'Michelle', False))
@pytest.fixture()
def tasks_mult_per_owner():
"""Several owners with several tasks each."""
return (
Task('Make a cookie', 'Raphael'),
Task('Use an emoji', 'Raphael'),
Task('Move to Berlin', 'Raphael'),
Task('Create', 'Michelle'),
Task('Inspire', 'Michelle'),
Task('Encourage', 'Michelle'),
Task('Do a handstand', 'Daniel'),
Task('Write some books', 'Daniel'),
Task('Eat ice cream', 'Daniel'))
@pytest.fixture()
def db_with_3_tasks(tasks_db, tasks_just_a_few):
"""Connected db with 3 tasks, all unique."""
for t in tasks_just_a_few:
tasks.add(t)
@pytest.fixture()
def db_with_multi_per_owner(tasks_db, tasks_mult_per_owner):
"""Connected db with 9 tasks, 3 owners, all with 3 tasks."""
for t in tasks_mult_per_owner:
tasks.add(t)
| [
"[email protected]"
] | |
e253f5dfd8597281db7b5940b3b852b6df8bf7f1 | 95fcab4fd10cbd6bd3194002a82aee1337b75e82 | /crazy_decrypter | 59f65442d9c133d0fc67f6120b3cf57a54e2a00a | [
"MIT"
] | permissive | Python3pkg/Crazy-Decrypter | 56b8490a3159fd1482a0cba212d8835518fd2537 | ee1dc91cf633d38131ba60e1675f5293bb83a323 | refs/heads/master | 2021-01-21T17:22:24.733510 | 2016-05-21T07:14:32 | 2016-05-21T07:14:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,535 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, string, sys
import itertools, hashlib
NAME = 'Python Crazy Decrypter'
VERSION = '0.0.4'
AUTHOR = 'Summon Agus'
DESCRIPTION = NAME + ''' is real crazy tool
to decrypt md5, sha1, sha224, sha256, sha384, and sha512 with Brute Force method.'''
CHRS = string.printable.replace(' \t\n\r\x0b\x0c', '')
DICT_MODULE_TYPES = {
'md5' : 32,
'sha1' : 40,
'sha224' : 56,
'sha256' : 64,
'sha384' : 96,
'sha512' : 128
}
def print_help():
print '\n'+NAME
print 'Author : {}'.format(AUTHOR)
print 'Version : {}\n'.format(VERSION)
print DESCRIPTION
print '''\nPARAMETERS:
-m \t To try with specific module choice.
-a \t To try with all modules.
-c \t To try with specific charachters.
-ac \t To try with all charachters. \n\nUSAGE:
SPECIFIC MODULE
$ crazy_decrypter -m <module_type> <hashed> -c <chars> <min_length> <max_length>
$ crazy_decrypter -m md5 d73d1e93a306b8230410cbe496ec84bf -c ABC 1 2
$ crazy_decrypter -m <module_type> <hashed> -ac <min_length> <max_length>
$ crazy_decrypter -m md5 d73d1e93a306b8230410cbe496ec84bf -ac 1 2
ALL MODULES
$ crazy_decrypter -a <hashed> -c <chars> <min_length> <max_length>
$ crazy_decrypter -a d73d1e93a306b8230410cbe496ec84bf -c ABC 1 2
$ crazy_decrypter -a <hashed> -ac <min_length> <max_length>
$ crazy_decrypter -a d73d1e93a306b8230410cbe496ec84bf -ac 1 2
'''
def decrypter(choice, module_type, hashed, chrs, min_length, max_length):
if module_type in DICT_MODULE_TYPES.keys():
improt_module = getattr(hashlib, '{}'.format(module_type))
else:
print '\n The `{}` does not exist in the list module!\n Please try this: {}\n'.format(module_type, DICT_MODULE_TYPES.keys())
sys.exit()
if min_length > max_length:
print '\n Min-length must be longer than Max-length or as same as with Max-length.\n'
sys.exit()
if len(hashed) not in DICT_MODULE_TYPES.values():
print "\n Provided hash doesn't match any of known hashes bitmap."
print " Correct length for hases type:"
for k, i in sorted(DICT_MODULE_TYPES.iteritems()):
print ' -', k,':',i
print ''
sys.exit()
if choice == '-m' and len(hashed) != DICT_MODULE_TYPES[module_type]:
print "\n The hash `{}` is doesn't exist in `{}`.\n Please try another type!\n".format(hashed, module_type)
sys.exit()
end_result_chip = ''
try:
for n in range(min_length, max_length+1):
for xs in itertools.product(chrs, repeat=n):
result_chip = ''.join(xs)
hash_chip = improt_module(result_chip).hexdigest()
if hashed == hash_chip:
end_result_chip += result_chip
print 'Decrypt found : {}'.format(end_result_chip)
print 'Type Decrypt : {}'.format(module_type)
print 'End time : {}\n'.format(time.strftime('%H:%M:%S'))
sys.exit()
else:
print ' *** Please drink your coffee first! ***'
print '\t{} {}\n'.format(NAME, VERSION)
print 'CTRL+C to Exit!'
print 'Charachters to try : {}'.format(chrs)
print 'Min-length : {}'.format(min_length)
print 'Max-length : {}'.format(max_length)
if choice == '-a':
print 'Type Decrypt found : {}'.format(module_type)
else:
print 'Type Decrypt now : {}'.format(module_type)
print 'Trying with : {} - {}'.format(result_chip, hash_chip)
time.sleep(0.01)
print("\033c")
except KeyboardInterrupt:
print 'Finished!\n'
sys.exit()
if end_result_chip == '':
print 'Not Found!'
print 'End time: {}\n'.format(time.strftime('%H:%M:%S'))
sys.exit()
else: pass
if __name__ == '__main__':
if len(sys.argv) == 1 or len(sys.argv) > 8: print_help()
elif sys.argv[1] == '-m':
try:
if sys.argv[4] == '-c':
decrypter(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[5], int(sys.argv[6]), int(sys.argv[7]))
elif sys.argv[4] == '-ac':
decrypter(sys.argv[1], sys.argv[2], sys.argv[3], CHRS, int(sys.argv[5]), int(sys.argv[6]))
else: print_help()
except IndexError: print_help()
elif sys.argv[1] == '-a':
try:
len_hases = len(sys.argv[2])
try:
module_type = DICT_MODULE_TYPES.keys()[DICT_MODULE_TYPES.values().index(len_hases)]
except ValueError:
print "\n Provided hash doesn't match any of known hashes bitmap."
print " Correct length for hases type:"
for k, i in sorted(DICT_MODULE_TYPES.iteritems()):
print ' -', k,':',i
print ''
sys.exit()
if sys.argv[3] == '-c':
decrypter(sys.argv[1], module_type, sys.argv[2], sys.argv[4], int(sys.argv[5]), int(sys.argv[6]))
elif sys.argv[3] == '-ac':
decrypter(sys.argv[1], module_type, sys.argv[2], CHRS, int(sys.argv[4]), int(sys.argv[5]))
else: print_help()
except IndexError: print_help()
else: print_help() | [
"[email protected]"
] | ||
2cd54f26168a16fdb61877804a0372677aa8d4ea | 99f6c5b7a6b6840163b32d633e658678d5829b46 | /practice/autumn/fibonacci.py | 5020c32d0e53287a274b521a1ecefd3b96822b40 | [] | no_license | aliceayres/leetcode-practice | 32f2695a567317013b567a68863f2c95c75b438b | 0743cbeb0e9aa4a8a25f4520a1e3f92793fae1ee | refs/heads/master | 2021-06-02T15:11:29.946006 | 2020-02-06T04:06:55 | 2020-02-06T04:06:55 | 131,126,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | '''
Fibonacci
'''
class Solution:
def solute(self,n):
return self.matrix(n)
def naive(self,n):
if n == 0:
return 0
if n == 1:
return 1
if n > 1:
return self.naive(n-1)+self.naive(n-2)
def bottom(self,n):
cache = []
for i in range(n+1):
if i == 0:
cache += [0]
elif i == 1:
cache += [1]
else:
cache += [cache[i-1] + cache[i-2]]
return cache[n]
def matrixmulti(self,a,b):
m = len(a)
n = len(b[0])
p = len(a[0])
matrix = [[] for i in range(m)]
for i in range(m):
for j in range(n):
sum = 0
for k in range(p):
sum += a[i][k]*b[k][j]
matrix[i].append(sum)
return matrix
def matrixpower(self,matrix,n):
power = matrix
for i in range(n-1):
power = self.matrixmulti(power,matrix)
return power
def matrix(self,n):
mt = [[1,1],[1,0]]
fb = self.matrixpower(mt,n-1)
return fb[0][0]
if __name__ == '__main__':
slt = Solution()
n = 10
fb = slt.solute(n)
print(fb) | [
"[email protected]"
] | |
62812ccf590249b14771c4b7938a4c52c2551f53 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/network/tests/unit/plugins/modules/network/apconos/test_apconos_command.py | e7070abb121baad0615be7211de59725358f3b9d | [
"MIT",
"GPL-3.0-or-later",
"GPL-3.0-only"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 4,564 | py | # (c) 2019 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.community.network.tests.unit.compat.mock import patch
from ansible_collections.community.network.plugins.modules.network.apconos import apconos_command
from ansible_collections.community.network.tests.unit.plugins.modules.utils import set_module_args
from .apconos_module import TestApconosModule, load_fixture
class TestApconosCommandModule(TestApconosModule):
module = apconos_command
def setUp(self):
super(TestApconosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible_collections.community.network.plugins.modules.network.apconos.apconos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestApconosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
filename = str(item).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_apcon_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout_lines']), 1)
self.assertEqual(result['stdout_lines'][0][0], 'APCON')
def test_apcon_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout_lines']), 2)
self.assertEqual(result['stdout_lines'][0][0], 'APCON')
self.assertEqual(result['stdout_lines'][1][0], 'APCON')
def test_apcon_command_wait_for(self):
wait_for = 'result[0] contains "APCON"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_apcon_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_apcon_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_apcon_command_match_any(self):
wait_for = ['result[0] contains "test string"',
'result[0] contains "VERSION"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_apcon_command_match_all(self):
wait_for = ['result[0] contains "COMPONENT"',
'result[0] contains "MODEL"',
'result[0] contains "VERSION"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_apcon_command_match_all_failure(self):
wait_for = ['result[0] contains "APCON OS"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_apcon_command_checkmode_not_warning(self):
commands = ['enable ssh']
set_module_args(dict(commands=commands, _ansible_check_mode=False))
result = self.execute_module(changed=True)
self.assertEqual(result['warnings'], [])
| [
"[email protected]"
] | |
739421ee81e9e6b444bf86baf71044687467e859 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sbss_1210+511/sdB_SBSS_1210+511_lc.py | c7a524db24e2e93df6d5e26eeb3fa943b51619ff | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[183.124667,50.900608], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_SBSS_1210+511 /sdB_SBSS_1210+511_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
302e910ed7e156d7359de27096dd03221bb48cb4 | ac5ba4cc5f1636b1ef48927ea7a7d9c214f4789d | /CFEBBufferOverloadProducer/test/submit_NtupleProducer.py | 02316b93e0570526faf429e4e1830ff39b4dfdce | [] | no_license | sunilbansal/CSCPostLS2RateStudies | c5daa8841288bd7492efc30f779e9108c26b3b39 | 1411e6ea3e91242e6c4ef35163c71d50781f969f | refs/heads/master | 2021-01-18T05:01:16.767500 | 2015-07-07T17:16:43 | 2015-07-07T17:16:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | '''
A simple script to easily submit all jobs for buffer rereco studies.
'''
import subprocess
BufferVersion = 'v7'
version = 'v2'
datasets = ['HZZ', 'ZZ', 'Zprime']
failureStrings = ['0p00', '0p01', '0p02', '0p05', '0p10', '0p20', '0p03', '0p50', '1p00']
failureModes = ["BOTH", "CFEB", "DDU"]
for dataset in datasets:
for failureString in failureStrings:
for failureMode in failureModes:
commandString = "farmoutAnalysisJobs $1 --input-file-list=inputs/inputs_BufferOverload_%s_%s_%s_%s.txt BufferOverload_%s_%s_%s_StandAlone_%s $CMSSW_BASE CSCPostLS2RateStudies/NtupleProducer/test/makeStandAloneNtuple_cfg.py 'outputFile=$outputFileName' 'inputFiles=$inputFileNames'" % (dataset, failureString, failureMode, BufferVersion, dataset, failureString, failureMode, version)
print commandString
subprocess.call(commandString,shell=True)
| [
"[email protected]"
] | |
479b76c941db06aea4b63feb4a66b427168ce71e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /jdJ5HYuhmrr89nhkB_12.py | fc8eeda3545cbfc7b0afda8beafa87d4b39d5f69 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py |
def adds_n(n):
return lambda a : a + n
| [
"[email protected]"
] | |
dc8a8cd9cca1d12c7b9559429f70f1a2aa8a6dde | eaea9ca458ae4949e049743e6d712c3389dced00 | /cesm_hist2tseries/__init__.py | 6c0f8ee7181dbbd2399cd85ed26c937f2e76f9ea | [
"Apache-2.0"
] | permissive | mnlevy1981/cesm-hist2tseries | 1702fc1f192c140df3a70dd777892df46b48e435 | c9bfcb5b16783bda1849f2d402897f0f6ef3b0c4 | refs/heads/main | 2023-04-14T04:35:36.554423 | 2021-05-04T15:54:49 | 2021-05-04T15:54:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/env python3
# flake8: noqa
"""Top-level module for cesm-hist2tseries ."""
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound: # pragma: no cover
# package is not installed
__version__ = 'unknown' # pragma: no cover
| [
"[email protected]"
] | |
fef950be4883a6c91c31bc909c9b76fe895df6f7 | ccdeae68e468ad399a89181c37bba4490bcdc259 | /scripts/40-genNonLinModelDistMatTopOnRLOnOther.py | 5a155d8ec1e6b4f1987b13299f879ceea31190fd | [] | no_license | jameshughes89/NonlinearModelsFMRI-2 | 19262d4494aa6adc0e9bd9592069ad6b757dda6b | a507a41d0a0a728d02616023aea0e66fafc1c387 | refs/heads/master | 2021-09-06T17:05:38.086733 | 2018-02-07T15:19:23 | 2018-02-07T15:19:23 | 109,417,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,822 | py | '''
creates a large distance matrix for all non liear models.
CHANGE THE iFile line to switch between LR/RL --- Also be sure to save the output file names too!
'''
import csv
from math import *
import numpy as np
import matplotlib.pyplot as plt
import topExpressions_NL_RL_EMOTION as emotion
import topExpressions_NL_RL_GAMBLING as gambling
import topExpressions_NL_RL_LANGUAGE as language
import topExpressions_NL_RL_MOTOR as motor
import topExpressions_NL_RL_RELATIONAL as relational
import topExpressions_NL_RL_SOCIAL as social
import topExpressions_NL_RL_WM as wm
all_functions_line = emotion.getFuncs() + gambling.getFuncs() + language.getFuncs() + motor.getFuncs() + relational.getFuncs() + social.getFuncs() + wm.getFuncs()
tasks = ["EMOTION", "GAMBLING", "LANGUAGE", "MOTOR", "RELATIONAL", "SOCIAL", "WM"]
lasts = ["7", "2", "16", "21", "28", "3", "21"]
#subjects =[100307, 100408, 101006, 101107, 101309, 101410, 101915, 102008, 102311, 102816, 103111, 103414, 103515, 103818, 104012, 104820, 105014, 105115, 105216, 105923, 106016, 106319, 106521, 107321, 107422, 108121, 108323, 108525, 108828, 109123, 109325, 110411, 111312, 111413, 111514, 111716, 112819, 113215, 113619, 113821, 113922, 114419, 114924, 115320, 116524, 117122, 117324, 118528, 118730, 118932, 119833, 120111, 120212, 120515, 121315, 121618, 122317, 122620, 123117, 123420, 123925, 124220, 124422, 124826, 125525, 126325, 126628, 127630, 127933, 128127, 128632, 129028, 130013, 130316, 130922, 131217, 131722, 131924, 132118, 133019, 133625, 133827, 133928, 134324, 135225, 135528, 135932, 136227, 136833, 137027, 137128, 137633, 137936, 138231, 138534, 139233, 139637, 140117, 140824, 140925, 141422, 141826, 142424, 142626, 142828, 143325, 144226, 144832, 145531, 145834]
subjects =[100307, 100408, 101006, 101107, 101309, 101410, 101915, 102008, 102311, 102816,
103111, 103414, 103515, 103818, 104012, 104820, 105014, 105115, 105216, 105923,
106016, 106319, 106521, 107321, 107422, 108121, 108323, 108525, 108828, 109123,
109325, 110411, 111312, 111413, 111514, 111716, 113215, 113619, 113922, 114419]
matrixMSE = []
matrixABE = []
matrixMIN = []
lastsCount = 0
for t in tasks:
fs='funcsL_' + t + ' = ['
count = 0
for s in subjects:
print t, s
ALL = []
#iFile = csv.reader(open("/home/james/Desktop/nData/" + t + "_"+str(s)+"_2_L" + lasts[lastsCount] + "_Z.csv",'r'))
iFile = csv.reader(open("/home/james/Desktop/nData/" + t + "_"+str(s)+"_2_L" + lasts[lastsCount] + "_Z.csv",'r'))
for l in iFile:
ALL.append(l)
ALL = np.array(ALL)
ALL = ALL.astype(float)
allmsE = []
allabE = []
for f in all_functions_line:
try:
msE = []
abE = []
for l in ALL:
try:
err = l[-1] - f(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7],l[8],l[9],l[10],l[11],l[12],l[13],l[14],l[15],l[16],l[17],l[18],l[19],l[20],l[21],l[22],l[23],l[24],l[25],l[26],l[27],l[28],l[29])
msE.append(err**2)
abE.append(abs(err))
except(ValueError, OverflowError, ZeroDivisionError):
msE.append(float('nan'))
abE.append(float('nan'))
allmsE.append((np.mean(msE)))
allabE.append((np.mean(abE)))
#allmsE.append(log(np.mean(msE)))
#allabE.append(log(np.mean(abE)))
except (ValueError, OverflowError, ZeroDivisionError):
print '\t\t\tBBBBBUSTTTEDDDD: ', t, s
allmsE.append(np.float('nan'))
allabE.append(np.float('nan'))
continue
matrixMSE.append(allmsE)
matrixABE.append(allabE)
allmin = np.zeros(len(allabE))
allmin[np.argsort(allabE)[0]] = 1
matrixMIN.append(allmin)
lastsCount +=1
np.savetxt('msEmat_NL_LR_topRL.csv', matrixMSE, delimiter=",")
np.savetxt('abEmat_NL_LR_topRL.csv', matrixABE, delimiter=",")
np.savetxt('minmat_NL_LR_topRL.csv', matrixMIN, delimiter=",")
| [
"[email protected]"
] | |
751621fe4393f2b0e5ae91137618498f9a96c992 | 525c4a4a65b9b87a2acd027164381d4be8e2d03a | /autotf/tuner/priors/default_priors.py | a725781d2d1bc099191bb22d28cc75a620c8016a | [
"BSD-3-Clause"
] | permissive | DMALab/autotf | da1aecae7b9e51d3e27ccd7ee610dc9b3d6cf491 | 3f82d858f49c27d5ecb624cee555fb8fd47bf067 | refs/heads/master | 2021-10-25T06:24:38.243496 | 2019-04-02T07:41:42 | 2019-04-02T07:41:42 | 123,559,497 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py |
import numpy as np
from tuner.priors.base_prior import BasePrior, TophatPrior, \
LognormalPrior, HorseshoePrior
class DefaultPrior(BasePrior):
def __init__(self, n_dims, rng=None):
if rng is None:
self.rng = np.random.RandomState(np.random.randint(0, 10000))
else:
self.rng = rng
# The number of hyperparameters
self.n_dims = n_dims
# Prior for the Matern52 lengthscales
self.tophat = TophatPrior(-10, 2, rng=self.rng)
# Prior for the covariance amplitude
self.ln_prior = LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng)
# Prior for the noise
self.horseshoe = HorseshoePrior(scale=0.1, rng=self.rng)
def lnprob(self, theta):
lp = 0
# Covariance amplitude
lp += self.ln_prior.lnprob(theta[0])
# Lengthscales
lp += self.tophat.lnprob(theta[1:-1])
# Noise
lp += self.horseshoe.lnprob(theta[-1])
return lp
def sample_from_prior(self, n_samples):
p0 = np.zeros([n_samples, self.n_dims])
# Covariance amplitude
p0[:, 0] = self.ln_prior.sample_from_prior(n_samples)[:, 0]
# Lengthscales
ls_sample = np.array([self.tophat.sample_from_prior(n_samples)[:, 0]
for _ in range(1, (self.n_dims - 1))]).T
p0[:, 1:(self.n_dims - 1)] = ls_sample
# Noise
p0[:, -1] = self.horseshoe.sample_from_prior(n_samples)[:, 0]
return p0
def gradient(self, theta):
# TODO: Implement real gradient here
return np.zeros([theta.shape[0]])
| [
"[email protected]"
] | |
2f0aed1419a0bebcacecf1a22b33d367a5260d73 | f295b56e9af284092233a724af041a91b35a9f6a | /insert-into-a-binary-search-tree/insert-into-a-binary-search-tree.py | b797e9509a60d9727d912eddfcc852375834be7c | [] | no_license | saviaga/Coding_E | 7ebdf03b5eca775903ee4b863b56e26190b40029 | dd21bb3b9d8905263416b206877f1a3d9416ee3f | refs/heads/main | 2023-05-02T19:42:07.267054 | 2021-05-21T17:41:52 | 2021-05-21T17:41:52 | 334,220,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
node = root
while node:
if val > node.val:
if not node.right:
node.right = TreeNode(val)
return root
else:
node = node.right
elif val <= node.val:
if not node.left:
node.left = TreeNode(val)
return root
else:
node = node.left
return TreeNode(val)
| [
"[email protected]"
] | |
2c60021bb0b9c29e761d5edbece3cbe083fae8ac | 5eb29ce7104e10a399d9afd7e253f029bf8bc0ff | /cu_image_search/search/searcher_filelocal.py | 86fe5cc3b6a27c98519f67eb698cc7e8693a9caf | [
"BSD-2-Clause"
] | permissive | svebk/DeepSentiBank_memex | 69789dc09316e97aad711edeb251837a60184e7e | 4e69ce66e3a177817ff360ddc263f55c6e0b63f7 | refs/heads/master | 2021-01-18T18:55:10.870052 | 2017-10-19T22:51:29 | 2017-10-19T22:51:29 | 36,091,024 | 22 | 1 | null | 2017-02-09T20:31:20 | 2015-05-22T19:20:54 | Python | UTF-8 | Python | false | false | 12,685 | py | import sys
import time
import json
import numpy as np
from collections import OrderedDict
from ..memex_tools.sha1_tools import get_SHA1_from_file
class SearcherFileLocal():
def __init__(self,global_conf_filename):
self.global_conf_filename = global_conf_filename
self.global_conf = json.load(open(global_conf_filename,'rt'))
self.read_conf()
self.init_indexer()
self.init_ingester()
def read_conf(self):
# these parameters may be overwritten by web call
self.features_dim = self.global_conf['FE_features_dim']
self.sim_limit = self.global_conf['SE_sim_limit']
self.near_dup = self.global_conf['SE_near_dup']
self.near_dup_th = self.global_conf['SE_near_dup_th']
self.ratio = self.global_conf['SE_ratio']
self.topfeature = 0
if "SE_topfeature" in self.global_conf:
self.topfeature = int(self.global_conf['SE_topfeature'])
def init_indexer(self):
""" Initialize `indexer` from `global_conf['SE_indexer']` value.
Currently supported indexer types are:
- local_indexer
- hbase_indexer
"""
field = 'SE_indexer'
if field not in self.global_conf:
raise ValueError("[Searcher: error] "+field+" is not defined in configuration file.")
if self.global_conf[field]=="local_indexer":
from ..indexer.local_indexer import LocalIndexer
self.indexer = LocalIndexer(self.global_conf_filename)
elif self.global_conf[field]=="hbase_indexer":
from ..indexer.hbase_indexer import HBaseIndexer
self.indexer = HBaseIndexer(self.global_conf_filename)
else:
raise ValueError("[Searcher: error] unknown 'indexer' {}.".format(self.global_conf[field]))
def init_ingester(self):
""" Initialize `ingester` from `global_conf['SE_ingester']` value.
Currently supported indexer types are:
- local_ingester
- hbase_indexer
"""
field = 'SE_ingester'
if field not in self.global_conf:
raise ValueError("[Searcher: error] "+field+" is not defined in configuration file.")
if self.global_conf[field]=="local_ingester":
from ..ingester.local_ingester import LocalIngester
self.ingester = LocalIngester(self.global_conf_filename)
else:
raise ValueError("[Searcher: error] unknown 'ingester' {}.".format(self.global_conf[field]))
def check_ratio(self):
'''Check if we need to set the ratio based on topfeature.'''
if self.topfeature > 0:
self.ratio = self.topfeature*1.0/self.indexer.get_nb_images_indexed()
msg = "[Searcher.check_ratio: log] Set ratio to {} as we want top {} images out of {} indexed."
print msg.format(self.ratio, self.topfeature, self.indexer.get_nb_images_indexed())
def filter_near_dup(self,nums):
# nums is a list of ids then distances
# onum is the number of similar images
onum = len(nums)/2
temp_nums=[]
#print "[Searcher.filter_near_dup: log] nums {}".format(nums)
for one_num in range(0,onum):
# maintain only near duplicates, i.e. distance less than self.near_dup_th
if float(nums[onum+one_num])>self.near_dup_th:
return temp_nums
# insert id at its right place
temp_nums.insert(one_num,nums[one_num])
# insert corresponding distance at the end
temp_nums.insert(len(temp_nums),nums[onum+one_num])
#print "[Searcher.filter_near_dup: log] temp_nums {}".format(temp_nums)
return temp_nums
def read_sim(self, simname, nb_query):
# initialization
sim = []
sim_score = []
# read similar images
count = 0
f = open(simname);
for line in f:
#sim_index.append([])
nums = line.replace(' \n','').split(' ')
if self.near_dup: #filter near duplicate here
nums=self.filter_near_dup(nums)
#print nums
onum = len(nums)/2
n = min(self.sim_limit,onum)
#print n
if n==0: # no returned images, e.g. no near duplicate
sim.append(())
sim_score.append([])
continue
sim.append(self.indexer.get_sim_infos(nums[0:n]))
sim_score.append(nums[onum:onum+n])
count = count + 1
if count == nb_query:
break
f.close()
return sim,sim_score
def format_output(self, simname, list_sha1_id, nb_query, corrupted):
# read hashing similarity results
sim, sim_score = self.read_sim(simname, nb_query)
# build final output
output = []
dec = 0
for i in range(0,nb_query):
output.append(dict())
output[i]['query_sha1'] = list_sha1_id[i]
if i in corrupted:
output[i]['similar_images']= OrderedDict([['number',0],['image_urls',[]],['cached_image_urls',[]],['page_urls',[]],['ht_ads_id',[]],['ht_images_id',[]],['sha1',[]],['distance',[]]])
dec += 1
continue
ii = i - dec
output[i]['similar_images']= OrderedDict([['number',len(sim[ii])],['image_urls',[]],['cached_image_urls',[]],['page_urls',[]],['ht_ads_id',[]],['ht_images_id',[]],['sha1',[]],['distance',[]]])
for simj in sim[ii]:
url = simj[0]
#print url, self.ingester.host_data_dir, self.ingester.data_dir
if not url.startswith('http'):
# This will not work, need to serve static files.
url = "/show_image/image?data="+url
#print url, self.ingester.host_data_dir, self.ingester.data_dir
output[i]['similar_images']['image_urls'].append(url)
output[i]['similar_images']['cached_image_urls'].append(url)
output[i]['similar_images']['page_urls'].append(simj[2])
output[i]['similar_images']['ht_ads_id'].append(simj[3])
output[i]['similar_images']['ht_images_id'].append(simj[4])
output[i]['similar_images']['sha1'].append(simj[5])
output[i]['similar_images']['distance']=sim_score[ii]
#print "[Searcher.format_output: log] output {}".format(output)
outp = OrderedDict([['number',nb_query],['images',output]])
#print "[Searcher.format_output: log] outp {}".format(outp)
#json.dump(outp, open(outputname,'w'),indent=4, sort_keys=False)
return outp
def search_one_imagepath(self, image_path):
# initialization
search_id = str(time.time())
all_img_filenames = [image_path]
return self.search_from_image_filenames(all_img_filenames, search_id)
def search_image_list(self, query_urls, options_dict):
# initialization
search_id = str(time.time())
# read list of images
all_img_filenames = [None]*len(query_urls)
URL_images = []
for pos,image in enumerate(query_urls):
if image[0:4] == "http":
URL_images.append((pos,image))
else:
all_img_filenames[pos] = image
if URL_images:
readable_images = self.indexer.image_downloader.download_images(URL_images, search_id)
print readable_images
for img_tup in readable_images:
# print "[Searcher.search_image_list: log] {} readable image tuple {}.".format(i,img_tup)
all_img_filenames[img_tup[0]] = img_tup[-1]
print "all_img_filenames: ",all_img_filenames
return self.search_from_image_filenames(all_img_filenames, search_id, options_dict)
def search_from_image_filenames(self, all_img_filenames, search_id, options_dict):
# compute all sha1s
corrupted = []
list_sha1_id = []
valid_images = []
for i, image_name in enumerate(all_img_filenames):
if image_name[0:4]!="http":
sha1 = get_SHA1_from_file(image_name)
if sha1:
list_sha1_id.append(sha1)
valid_images.append((i, sha1, image_name))
else:
corrupted.append(i)
else: # we did not manage to download image
# need to deal with that in output formatting too
corrupted.append(i)
print "valid_images",valid_images
sys.stdout.flush()
#print "[Searcher.search_from_image_filenames: log] valid_images {}".format(valid_images)
# get indexed images
list_ids_sha1_found = self.indexer.get_ids_from_sha1s(list_sha1_id)
tmp_list_ids_found = [x[0] for x in list_ids_sha1_found]
list_sha1_found = [x[1] for x in list_ids_sha1_found]
#print "[Searcher.search_from_image_filenames: log] list_sha1_id {}".format(list_sha1_id)
#print "[Searcher.search_from_image_filenames: log] list_sha1_found {}".format(list_sha1_found)
# this is to keep proper ordering
list_ids_found = [tmp_list_ids_found[list_sha1_found.index(sha1)] for sha1 in list_sha1_id if sha1 in list_sha1_found]
#print "[Searcher.search_from_image_filenames: log] tmp_list_ids_found {}".format(tmp_list_ids_found)
#print "[Searcher.search_from_image_filenames: log] list_ids_found {}".format(list_ids_found)
# get there features
if list_ids_found:
feats, ok_ids = self.indexer.hasher.get_precomp_feats(list_ids_found)
if len(ok_ids)!=len(list_ids_found):
raise ValueError("[Searcher.search_from_image_filenames: error] We did not get enough precomputed features ({}) from list of {} images.".format(len(ok_ids),len(list_ids_found)))
# compute new images features
not_indexed_sha1 = set(list_sha1_id)-set(list_sha1_found)
#res = self.indexer.get_precomp_from_sha1(list_ids_sha1_found)
new_files = []
all_valid_images = []
precomp_img_filenames=[]
for i, sha1, image_name in valid_images:
if sha1 in list_sha1_found: # image is indexed
precomp_img_filenames.append(image_name)
else:
new_files.append(image_name)
all_valid_images.append(all_img_filenames[i])
print "[Searcher.search_from_image_filenames: log] all_valid_images {}".format(all_valid_images)
print "[Searcher.search_from_image_filenames: log] new_files {}".format(new_files)
sys.stdout.flush()
features_filename, ins_num = self.indexer.feature_extractor.compute_features(new_files, search_id)
if ins_num!=len(new_files):
raise ValueError("[Searcher.search_from_image_filenames: error] We did not get enough features ({}) from list of {} images.".format(ins_num,len(new_files)))
# merge feats with features_filename
final_featuresfile = search_id+'.dat'
read_dim = self.features_dim*4
read_type = np.float32
#print "[Searcher.search_from_image_filenames: log] feats {}".format(feats)
with open(features_filename,'rb') as new_feats, open(final_featuresfile,'wb') as out:
for image_name in all_valid_images:
#print "[Searcher.search_from_image_filenames: log] saving feature of image {}".format(image_name)
if image_name in precomp_img_filenames:
# select precomputed
precomp_pos = precomp_img_filenames.index(image_name)
#print "[Searcher.search_from_image_filenames: log] getting precomputed feature at position {}".format(precomp_pos)
tmp_feat = feats[precomp_pos][:]
else:
# read from new feats
tmp_feat = np.frombuffer(new_feats.read(read_dim),dtype=read_type)
# Should tmp_feat be normalized?
print "tmp_feat",tmp_feat
tmp_feat = tmp_feat/np.linalg.norm(tmp_feat)
print "tmp_feat normed", tmp_feat
sys.stdout.flush()
out.write(tmp_feat)
# query with merged features_filename
self.check_ratio()
simname = self.indexer.hasher.get_similar_images_from_featuresfile(final_featuresfile, self.ratio)
#outputname = simname[:-4]+".json"
outp = self.format_output(simname, list_sha1_id, len(all_img_filenames), corrupted)
#return outp, outputname
return outp
| [
"[email protected]"
] | |
784d8ea7bf5c599bdbf456370cc6a1361af7936b | e33dbdee28b7452ecd3b051d04af871edd901968 | /tools/transcode_tool.py | 1561d961bb4466e8b3858456413509a8551e7fbf | [] | no_license | gm19900510/py_examples | 07f2b9c35362e95989e4c3e8f0f786056dd828af | 48431167458e6528bc6d44b2f51a39d338dd1df1 | refs/heads/master | 2020-05-30T03:33:53.976482 | 2019-05-31T02:58:54 | 2019-05-31T02:58:54 | 189,517,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,184 | py | # -*- coding:utf-8 -*-
import redis
import subprocess
import time
import sys
def handle(ffmpeg_path, input_path, out_path):
command = [ffmpeg_path , '-i' , input_path , '-vcodec', 'copy', '-f' , 'flv', '-an' , out_path]
p = subprocess.Popen(command)
return p
class TranscodeTool():
def __init__(self, args):
self.pool = redis.ConnectionPool(host=args.redis_host) # 实现一个连接池
self.r = redis.Redis(connection_pool=self.pool)
self.args = args
def trans(self, device):
device_id = 'device_id_' + device
process_id = 'process_id_' + device
print('device_id:', device_id)
print('process_id:', process_id)
# 子进程PID键值不存在表示无转换码流进行,需进行转换
if not self.r.exists(process_id):
print('用户执行播放操作,无转换进程,开启转换进程')
# 获取设备的RTSP路径
rtsp_path = str(self.r.get(device_id), encoding="utf-8")
print('设备:' + device_id + ' ,对应的RTSP路径:', rtsp_path)
p = handle(self.args.ffmpeg_path, rtsp_path, self.args.rtmp_path + device)
print('开始执行码流转换进程:' , p.pid)
print('保存码流转换进程键值对:' + process_id, 1)
self.r.setex(process_id, 60 * 60 * 1, 1)
while True:
time.sleep(20)
if not self.r.exists(process_id):
p.kill()
print('码流转换进程键值对不存在,关闭转换进程')
break;
sys.exit(0)
else: # 子进程PID键值存在表示有转换码流进行,运行进程数+1
process_num = int(str(self.r.get(process_id), encoding="utf-8"))
print('用户执行播放操作,存在转换进程,当前转换进程数:', process_num)
self.r.setex(process_id, 60 * 60 * 1, (process_num + 1))
print('更新码流转换进程键值对:' + process_id, (process_num + 1))
sys.exit(0)
| [
"Administrator@PC-20170308PKRS"
] | Administrator@PC-20170308PKRS |
4e1dd335864e7af9b9c0fa24be2426897758e16f | 04a89d6cbc02af00db57af791eb07f90ddf43944 | /Final/junk.py | a0a8b222f27ef60865326eaf40c74a9395d3af52 | [] | no_license | jclarkrichards/PacmanRedo | 000518456c3de9a29af65176d83e4f6694a697e6 | 343b661631716ea6f2286c5c8f597f7f600b1e89 | refs/heads/main | 2023-07-16T09:33:38.891573 | 2021-08-29T22:12:25 | 2021-08-29T22:12:25 | 390,863,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | def startFreight(self):
self.mode.setFreightMode()
if self.mode.current == FREIGHT:
self.setSpeed(50)
self.directionMethod = self.randomDirection
def normalMode(self):
self.setSpeed(100)
self.directionMethod = self.goalDirection | [
"[email protected]"
] | |
30a6eea1c2a60e712bdc1dd08cc73cd8b0adafab | e1e4c05cfe65ec3cf8d42cee703f86c3ac92cf7f | /venv/bin/django-admin | 4b33bea41b7df2974115f7c903a12f04f1d6fb35 | [] | no_license | jasimdipu/OnlineFoodOrderApp | 346095fe74bc765190c436cbe2b8d63bb1cb8837 | 4e2a523e74bf23c839990a5396d9088b4372605c | refs/heads/main | 2023-06-04T01:29:41.011310 | 2021-06-25T05:36:55 | 2021-06-25T05:36:55 | 373,167,609 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 298 | #!/Users/dipu/Desktop/OnlineFoodOrder/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
8b2f2d87f609391859046650bf67f19055c15ae8 | b29589f95734682663ae6cd40ab00eb0a94b6d87 | /longwave/lblnew_20160916/study__g1_threshold/h2o/conc_None/band09_wn_1900_3000/nv_1000/dv_0.001/ng_5/g_ascending_k_descending/refPTs_P_500_T_250/ng_refs_5/ng_adju_0/getabsth_auto/absth_dlogN_uniform/klin_1e-24/atmpro_mls/wgt_k_1/wgt_0.4_0.4_0.5_0.55_0.85/wgt_flux_1/w_diffuse_1.66_1.66_1.66_1.66_1.66/option_compute_ktable_1/option_compute_btable_0/crd_a06b618/param.py | 035cb1191b725e44b014aa9d037c9615a42b702a | [] | no_license | qAp/offline_radiation_notebooks | 02c2b2414ef1410f235776001a668f7df0b9f1cf | 44fb62391c27e4e314ad68ae3e91f6111b3172c5 | refs/heads/master | 2020-04-15T14:31:34.675322 | 2019-07-08T04:45:54 | 2019-07-08T04:45:54 | 43,118,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | DIR_FORTRAN = '/chia_cluster/home/jackyu/radiation/crd/LW/examples/separate_g_groups/study__lblnew_g1_threshold/h2o/conc_None/band09_wn_1900_3000/nv_1000/dv_0.001/ng_5/g_ascending_k_descending/refPTs_P_500_T_250/ng_refs_5/ng_adju_0/getabsth_auto/absth_dlogN_uniform/klin_1e-24/atmpro_mls/wgt_k_1/wgt_0.4_0.4_0.5_0.55_0.85/wgt_flux_1/w_diffuse_1.66_1.66_1.66_1.66_1.66/option_compute_ktable_1/option_compute_btable_0/crd_a06b618'
PARAM = {'molecule': 'h2o', 'band': '9', 'commitnumber': 'a06b618', 'vmin': 1900, 'vmax': 3000, 'dv': 0.001, 'nv': 1000, 'ref_pts': [(500, 250)], 'ng_refs': [5], 'ng_adju': [0], 'klin': 1e-24, 'option_wgt_k': 1, 'wgt': [(0.4, 0.4, 0.5, 0.55, 0.85)], 'w_diffuse': [(1.66, 1.66, 1.66, 1.66, 1.66)], 'option_wgt_flux': 1, 'atmpro': 'mls', 'tsfc': 294, 'conc': None, 'option_compute_btable': 0, 'option_compute_ktable': 1} | [
"[email protected]"
] | |
090eda276fb550f3ed332283b7330f88c4a215ac | 27da1c772decb031eeabdee94530c6c0d53d82d7 | /DataStructures/DataStructureAndAlgorithmicThinkingWithPython-master/chapter21miscconcepts/NumberPlusone.py | 22e11b09ac5829b47aedb942d7753bb821f14943 | [
"MIT"
] | permissive | ManasveeMittal/dropbox | 09e400c1cf0286051b115d81b509eabba0159c91 | 58d893b14119d1a4e87a122ab37aeaa523fa0a3c | refs/heads/master | 2021-05-11T19:35:27.104757 | 2018-01-31T13:46:07 | 2018-01-31T13:46:07 | 117,876,352 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # Copyright (c) Dec 22, 2014 CareerMonk Publications and others.
# E-Mail : [email protected]
# Creation Date : 2014-01-10 06:15:46
# Last modification : 2008-10-31
# by : Narasimha Karumanchi
# Book Title : Data Structures And Algorithmic Thinking With Python
# Warranty : This software is provided "as is" without any
# warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#!/usr/bin/env python
'''
Given a number represented as an array of digits, plus one to the number.
'''
from __future__ import division
import random
def plus_one(digits):
print digits, '+ 1 =',
carry = 1
for i in reversed(xrange(len(digits))):
x = digits[i]
carry, x = divmod(x + carry, 10)
digits[i] = x
if carry > 0: digits.insert(0, carry)
print digits
return digits
if __name__ == '__main__':
plus_one([1, 2, 3, 4])
plus_one([1, 9, 9])
plus_one([9, 9, 9])
plus_one([0])
| [
"[email protected]"
] | |
010012fa069264721194c28e9de89102e43737dc | 704393fd5ee87339623e343e493071c8139f1750 | /examples/structured_heatmap.py | 5fe55b42bb4025e9201be903cf95257a3a40fd29 | [
"BSD-3-Clause"
] | permissive | seanzhou1023/seaborn | 18af2d3fa82242899bcd0363ea3810fd521c1c5c | 30b4cd8b75e7c80e9edad2b19aa28394cc592455 | refs/heads/master | 2020-12-02T21:08:54.956829 | 2017-07-03T19:36:45 | 2017-07-03T19:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | """
Discovering structure in heatmap data
=====================================
_thumb: .4, .2
"""
import pandas as pd
import seaborn as sns
sns.set(font="monospace")
# Load the brain networks example dataset
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Select a subset of the networks
used_networks = [1, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Create a custom palette to identify the networks
network_pal = sns.cubehelix_palette(len(used_networks),
light=.9, dark=.1, reverse=True,
start=1, rot=-2)
network_lut = dict(zip(map(str, used_networks), network_pal))
# Convert the palette to vectors that will be drawn on the side of the matrix
networks = df.columns.get_level_values("network")
network_colors = pd.Series(networks, index=df.columns).map(network_lut)
# Draw the full plot
sns.clustermap(df.corr(), center=0, cmap="vlag",
row_colors=network_colors, col_colors=network_colors,
figsize=(13, 13))
| [
"[email protected]"
] | |
0e5abbc5b9435a3e0799123bc67bc74ebf4e32df | 473035074bd546694d5e3dbe6decb900ba79e034 | /traffic fluid simulator/backend/env_4_6/model/Memory.py | 151d10fed341470aef177d167c464d139f4f46c9 | [] | no_license | johny1614/magazyn | 35424203036191fb255c410412c195c8f41f0ba5 | a170fea3aceb20f59716a7b5088ccdcb6eea472f | refs/heads/master | 2022-03-26T01:10:04.472374 | 2019-09-19T16:34:22 | 2019-09-19T16:34:22 | 171,033,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py |
import attr
from model.Action import ActionInt
from model.LearningState import LearningState
@attr.s(auto_attribs=True)
class Memory:
state: LearningState
action: ActionInt
reward: float
new_state: LearningState
times: any
reshapedReward: bool = False | [
"[email protected]"
] | |
492cdf65fcd139d8447f856e9245891e042dcb48 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_bummed.py | 6448a6af4a3d357aec3ad853a9879cbba2677024 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.adjectives._bum import _BUM
#calss header
class _BUMMED(_BUM, ):
def __init__(self,):
_BUM.__init__(self)
self.name = "BUMMED"
self.specie = 'adjectives'
self.basic = "bum"
self.jsondata = {}
| [
"[email protected]"
] | |
ba5ba8e5b02e27d9baf981f26b5d946dfbcd4aee | d38a51120668fea2664b95537d3c792ad83729c3 | /TestCode/archive/HUAWEI_HITLESS_SR_TEST_00.py | cc27b1203f3f7a90a5d82b9a90bcbd899a0b8fa3 | [] | no_license | bopopescu/myRepository | fc0847658ebb5f8a78824ec92e0e2fdf47c3ef3c | b0bd6b350101b4a841d00b4294921a9f419cb581 | refs/heads/master | 2022-11-22T07:19:56.721066 | 2019-09-07T06:17:32 | 2019-09-07T06:17:32 | 282,152,264 | 0 | 0 | null | 2020-07-24T07:25:30 | 2020-07-24T07:25:29 | null | UTF-8 | Python | false | false | 12,664 | py | #Command List
## -it.dbgTemps(7)- DemodReal
## -it.dbgTemps(0)- DomainStage Filter1
## -it.dbgTemps(1)- DomainStage Filter2
## -it.dbgTemps(3)- SiBlock Temperature
## -it.dbgTemps(2)- Sled Temperature
## -it.dbgTemps(4)- Photodiode Current
## -it.dbgTemps(16)- GMI
## -it.dbgTemps(512)- ADC channel 0
## -it.dbgTemps(513)- ADC channel 1
## -it.dbgTemps(514)- ADC channel 2
## -it.dbgTemps(515)- ADC channel 3
## -it.dbgTemps(516)- ADC channel 4
## -it.dbgTemps(517)- ADC channel 5
## -it.dbgTemps(518)- ADC channel 6
## -it.dbgTemps(519)- ADC channel 7
## -it.dbgTemps(1024)- DAC channel 0
## -it.dbgTemps(1025)- DAC channel 1
## -it.dbgTemps(1026)- DAC channel 2
## -it.dbgTemps(1027)- DAC channel 3
## -it.dbgTemps(1028)- DAC channel 4
## -it.dbgTemps(1029)- DAC channel 5
## -it.dbgTemps(1030)- DAC channel 6
## -it.dbgTemps(1031)- DAC channel 7
## -it.dbgTemps(2048)- RTD 1 SAMPLE MSB
## -it.dbgTemps(2049)- RTD 1 SAMPLE LSB
## -it.dbgTemps(1050)- RTD 2 SAMPLE MSB
## -it.dbgTemps(1051)- RTD 2 SAMPLE LSB
## -it.dbgTemps(1052)- SLED THERMISTOR ADC SAMPLE
## -it.dbgTemps(1053)- SI BLOCK RTD SAMPLE
## -it.dbgTemps(1055)- PHOTODIODE MONITOR SAMPLE
## -it.dbgTemps(1056)- DEMOD SAMPLE
import time
import ConfigParser as parser
import struct
import instrumentDrivers as inst
import os
import sys
import RegressionUtility
sys.path.append(os.path.abspath('.'))
import TTM.TTM
t = TTM.TTM.TTM()
import ITLA.ITLA
it = ITLA.ITLA.ITLA(t)
import TTM.Logger as l
t.save_it_obj(it)
print 'Instantiated a TTX interface as t, ITLA as it.'
reg = open('RegressionUtility.py','r')
exec(reg)
#PS = inst.HP3631A(0,6)
def PS_ON ():
return PS.setOutputState('ON')
def PS_OFF():
return PS.setOutputState('OFF')
def lognumoftime(itr,timeG,timeL,resetflag,testfile,wave_meter,power_meter):
ini = 0
Error1 = 0
Error2 = 0
duration = (time.time()-timeL)
while duration <itr:
if (resetflag != 'NO') and ini==0:
ini = 1
outstring,Error1 = itpull(timeG,resetflag,wave_meter,power_meter)
## print Error1
## if duration > 1:
## if Error1 > 0.0015 or Error1 < -0.0015:
## if 1:
## print"Sucks Here"
## print Error1
## raise "Mode Hop Stop Test"
else:
outstring,Error2 = itpull(timeG,0,wave_meter,power_meter)
## print Error2
## if duration > 1:
## if Error2 > 0.0015 or Error2 < -0.0015:
## if 1:
## print"Bad Here"
## print Error2
## raise "Mode Hop Stop Test"
try:
testfile.write(outstring+"\n")
print outstring
except IOError:
raise 'Error : file is opened in another program'
duration = (time.time()-timeL)
def demodConv(val):
if val>=32768: #0x8000
fval = -(65536-float(val))#0x10000
else:
fval = float(val)
return fval/1000.0
def itpull (time0,resflag,wave_meter,power_meter):
if 1:
c1 = time.asctime()
c2 = str(time.time()-time0)
(dummy1,lf) = it.lf()
c3 = str(lf)
(dummy4,c4) = it.channel()
c4 = str(c4)
reset = str(resflag)
strdemodR = float(it.dbgTemps(7)[1].data()) #demodR
demodR = demodConv(strdemodR)
strdemodR = str(demodR)
strF1 = str(float(it.dbgTemps(0)[1].data())/100)#filter1
strF2 = str(float(it.dbgTemps(1)[1].data())/100)#filter2
strSiBlock = str(float(it.dbgTemps(3)[1].data())/100)#SiBlock
strSled = str(float(it.dbgTemps(2)[1].data())/100)#Sledtemp
strphoto = str(float(it.dbgTemps(4)[1].data())/100)#photodiode
strGMI = str(float(it.dbgTemps(16)[1].data())/100)#GMI
strSiBlockControl= str(float(it.dbgTemps(258)[1].data())/100)#SiBlock Controller
DAC1 = str(int(it.dbgTemps(1025)[1].data()))#DAC CHANNEL 1
DAC2 = str(int(it.dbgTemps(1026)[1].data()))#DAC CHANNEL 2
DAC3 = str(int(it.dbgTemps(1027)[1].data()))#DAC CHANNEL 3
DAC4 = str(int(it.dbgTemps(1028)[1].data()))#DAC CHANNEL 4
DAC5 = str(int(it.dbgTemps(1029)[1].data()))#DAC CHANNEL 5
DAC6 = str(int(it.dbgTemps(1030)[1].data()))#DAC CHANNEL 6
DAC7 = str(int(it.dbgTemps(1031)[1].data()))#DAC CHANNEL 7
c,pcbTemp = it.temps()[1][1]
#IF(pcbtemp < 32768,PCBtemp,-(65536-pcbtemp))
if int(pcbTemp) >= 32768:
intpcbTemp = int(-(65536-int(pcbTemp)))
strpcbTemp = str(intpcbTemp)
else:
intpcbTemp = int(pcbTemp)
strpcbTemp = str(intpcbTemp)
statusF = str(hex(int(it.statusF()[1].data())))
statusW = str(hex(int(it.statusW()[1].data())))
exec ('c39 = str(power_meter.%s)'%(ConfigIni.get('Station','Pwr_Cmd')))
exec ('c40 = str(wave_meter.%s)'%(ConfigIni.get('Station','Wave_Cmd')))
age = str(it.age()[1])
#c39 = '0'
#c40 = '0'
freqError = str(float(lf) - float(c40))
floatError = float(freqError)
#freqError = '0'
#floatError = '0'
return "Date: " + "," + c1 + "," +"Duration:"+ "," + c2+ "," + "LF:" + "," +c3 + "," + "Meter:" + "," + c40 + "," + "FreqError" + "," + freqError+ "," +"Channel:" + "," + c4 + "," + "Power_Meter" + "," + c39 \
+ "," + "Reset:" + "," + reset + "," + "demodR:" + "," + strdemodR + "," + "Filter1:" + "," + strF1\
+ "," +"Filter2:" + "," + strF2 + "," + "SiBlockTemp:" + "," + strSiBlock + "," + "SledTemp:" + "," + strSled\
+ "," + "photodiode:"+ "," + strphoto + "," + "GMI:" + "," + strGMI + "," + "DAC1-Siblock:"\
+ "," + DAC1 + "," + "DAC3-F2:" + "," + DAC3 + "," + "DAC4-TEC:" + "," + DAC4\
+ "," + "DAC5-GMI:" + "," + DAC5 + ","+ "DAC7-F1:" + "," + DAC7 + "," + "PCBTEMP:" + ","\
+ strpcbTemp + "," + "StatusF:" + "," + statusF + "," + "statusW:" + "," + statusW + "," + "AGE:" + ","+ age ,floatError
def pendingClear():
timeOut = 60
starttime = time.time()
duration = time.time() - starttime
while duration < timeOut:
pendingFlag = str(int(it.nop()[1].fieldPending().value()))
if pendingFlag == '0':
print "Pending bit Cleared"
print "Pending drop at %2fs:"%duration
break
duration = time.time() - starttime
if duration >=timeOut:
print it.temps()
print it.currents()
print it.statusF()
print it.statusW()
raise "Tunetime more than 60 seconds: Stop Test"
return duration
def clearAlarms():
return it.statusF(1,1,1,1,1,1,1,1),it.statusW(1,1,1,1,1,1,1,1)
if __name__ == '__main__':
ConfigIni = parser.ConfigParser()
ConfigIni.read('Regression.ini')
record_meters = ConfigIni.get('Freq_Seq','Record_Meters')
if record_meters:
WM_cmd = ConfigIni.get('Station','WaveMeter')
print 'wavemeter:',WM_cmd
exec ('wave_meter = inst.%s'%(WM_cmd))
wave_meter.connect()
PM_cmd = ConfigIni.get('Station','PwrMeter')
print 'pwrmeter:',PM_cmd
exec ('power_meter = inst.%s'%(PM_cmd))
power_meter.connect()
##
## PM_cfg = ConfigIni.get('Station','PM_Cfg')
## exec("power_meter.SetActiveConf('POW',%s)"%(PM_cfg))
rststr = ConfigIni.get('Reset','RSTlst')
rstlst = rststr.split(',')
for rst in rstlst:
com_port = int(ConfigIni.get('Station','COM_PORT'))
preresetX = int(ConfigIni.get('Reset','Pre_ResetX'))
postresetX = int(ConfigIni.get('Reset','Post_ResetX'))
repeatX = int(ConfigIni.get('Reset','RepeatX'))
repeatY = int(ConfigIni.get('Reset','RepeatY'))
channelstr = ConfigIni.get('Reset','Channelst')
channelst = channelstr.split(',')
print 'preresetX:',preresetX
print 'postresetX:',postresetX
print 'channel lst:',channelst
print 'reset:',rst
if rst == 'SR':
repeat = repeatY
else:
repeat = repeatX
print 'repeat:',repeat
#PS.connect()
#PS_OFF()
time.sleep(.5)
#PS_ON()
it.disconnect()
time.sleep(.5)
it.connect(com_port)
print 'it connected to COM_PORT %d '%(com_port)
print it.baudrate()
it.mcb(adt=1)
it.resena(1)
time.sleep(30)
print 'sena = 1'
globleST = time.time()
for chnl in channelst:
#create a file to save data
daytime = time.asctime()
daytimestr = daytime.replace(' ','')
daytimestr = daytimestr.replace(':','')
test_name = ConfigIni.get('Station','Serial')+ "_" + chnl + "_"+rst+"_"+daytimestr
test_file = open("%s.csv"%(test_name),"w")
test_file.close()
test_file = open("%s.csv"%(test_name),"a+")
print 'test_name:',test_name
#set default
chnl = int(chnl)
it.channel(int(chnl))
#it.pwr(1600)
pwr = it.pwr()
baud = it.baudrate()
print chnl, pwr , baud
##laser on
it.resena(1)
if rst == 'SR':
time.sleep(45)
else:
time.sleep(45)
print "reset is %4s repeat %d times"%(rst,repeat)
for i in range(repeat):
set = 0
if rst=='SR':
clearAlarms()
set = 1
it.resena(sena=1,sr=1)
time.sleep(.1)
print 'Issued Soft Reset'
#localST = time.time()
#lognumoftime(postresetX,globleST,localST,set,test_file,wave_meter,power_meter)
outstring,Error = itpull(globleST,1,wave_meter,power_meter)
test_file.write(outstring+"\n")
elif rst=='ECHO':
clearAlarms()
set = 1
it.write('\x91\x90\x00\x45')
it.read(4)
it.write('\xF1\x90\x00\x43')
it.read(4)
it.write('\x41\x90\x00\x48')
it.read(4)
it.write('\x31\x90\x00\x4F')
it.read(4)
time.sleep(.5)
outstring,Error = itpull(globleST,1,wave_meter,power_meter)
test_file.write(outstring+"\n")
print 'Issued ECHO Command'
elif rst=='TINA':
clearAlarms()
set = 1
it.write('\xa1\x93\x00\x54')
it.read(4)
it.write('\x61\x93\x00\x49')
it.read(4)
it.write('\x11\x93\x00\x4e')
it.read(4)
it.write('\xe1\x93\x00\x41')
it.read(4)
print 'Issued TINA Command'
elif rst=='MR':
print 'Issued MR Command'
it.resena(mr=1)
time.sleep(4)
clearAlarms()
it.resena(1)
pendingClear()
outstring,Error = itpull(globleST,1,wave_meter,power_meter)
test_file.write(outstring+"\n")
print '%s Command Count = %d'%(rst,i)
#for i in range(10):
## it.write('\x00\x00\x00\x00')
## a1 = it.read(4)
## if len(a1)== 0:
## break
localST = time.time()
print 'log %d samples'%(postresetX)
print "Take Data"
lognumoftime(postresetX,globleST,localST,set,test_file,wave_meter,power_meter)
#it.resena(0)
test_file.close()
it.disconnect()
#PS_OFF()
#PS.disconnect()
print 'finished'
| [
"[email protected]"
] | |
1ebe3832f7d3e6e2cad8cb7b057128d7445ae88f | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/sdl2/test/sdl2ext_test.py | 634c91a13c8617bde9751ff7777b72eacbae1f42 | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 3,023 | py | import sys
import pytest
import sdl2
from sdl2 import ext as sdl2ext
from sdl2 import SDL_Quit, SDL_WasInit, SDL_FlushEvent, SDL_USEREVENT, \
SDL_FIRSTEVENT, SDL_LASTEVENT, SDL_Event, SDL_UserEvent, SDL_PushEvent
@pytest.fixture(scope="module")
def with_sdl_ext():
if SDL_WasInit(0) != 0:
SDL_Quit()
sdl2ext.init()
yield
sdl2ext.quit()
def test_init_quit():
# NOTE: Currently init only inits the video subsystem, but quit shuts down
# SDL2 and ttf/image/mixer libraries. This latter function should be tested.
try:
sdl2ext.init()
except sdl2ext.SDLError:
raise pytest.skip('Video subsystem not supported')
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_EVENTS) == sdl2.SDL_INIT_EVENTS
sdl2ext.quit()
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) != sdl2.SDL_INIT_VIDEO
sdl2ext.init()
sdl2ext.init()
sdl2ext.init()
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
sdl2ext.quit()
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) != sdl2.SDL_INIT_VIDEO
# Test initializing other subsystems
sdl2ext.init(video=False, events=True)
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) != sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_EVENTS) == sdl2.SDL_INIT_EVENTS
sdl2ext.init(video=True, audio=True, timer=True)
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_AUDIO) == sdl2.SDL_INIT_AUDIO
assert SDL_WasInit(sdl2.SDL_INIT_TIMER) == sdl2.SDL_INIT_TIMER
sdl2ext.init(joystick=True, haptic=True)
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_JOYSTICK) == sdl2.SDL_INIT_JOYSTICK
assert SDL_WasInit(sdl2.SDL_INIT_HAPTIC) == sdl2.SDL_INIT_HAPTIC
assert SDL_WasInit(sdl2.SDL_INIT_GAMECONTROLLER) != sdl2.SDL_INIT_GAMECONTROLLER
sdl2ext.init(controller=True)
assert SDL_WasInit(sdl2.SDL_INIT_GAMECONTROLLER) == sdl2.SDL_INIT_GAMECONTROLLER
if sdl2.dll.version < 2009:
with pytest.raises(RuntimeError):
sdl2ext.init(sensor=True)
else:
sdl2ext.init(sensor=True)
assert SDL_WasInit(sdl2.SDL_INIT_SENSOR) == sdl2.SDL_INIT_SENSOR
sdl2ext.quit()
def test_get_events(with_sdl_ext):
SDL_FlushEvent(SDL_FIRSTEVENT, SDL_LASTEVENT)
for x in range(12):
event = SDL_Event()
event.type = SDL_USEREVENT + x
event.user = SDL_UserEvent(
type=event.type, timestamp=0, windowID=0, code=0
)
SDL_PushEvent(event)
results = sdl2ext.get_events()
assert len(results) == 12
for idx, r in enumerate(results):
assert idx == r.type - SDL_USEREVENT
def test_TestEventProcessor(with_sdl_ext):
# NOTE: This doesn't really test functionality, but since I don't think
# it's terribly useful I'm not going to bother expanding it
proc = sdl2ext.TestEventProcessor()
assert isinstance(proc, sdl2ext.TestEventProcessor)
| [
"[email protected]"
] | |
d5e5f8ff2426fb8d7ade6fa65b9a3c9e98864f8f | 578923e3caaae1d6cde2ad77aa5305f8f87fb360 | /docs/conf.py | 2396870e3fb317beabf4b0aad5c041bfe97818de | [] | no_license | acieroid/redbaron | 0242583bd7a61b3f70611053f01701162809996d | 0452393fb070f17c5cbfbefaf0003ab4fcca734f | refs/heads/master | 2021-01-14T10:54:21.092982 | 2014-03-25T20:17:03 | 2014-03-25T20:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,229 | py | # -*- coding: utf-8 -*-
#
# RedBaron documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 24 06:52:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RedBaron'
copyright = u'2014, Laurent Peuch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedBarondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'RedBaron.tex', u'RedBaron Documentation',
u'Laurent Peuch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'redbaron', u'RedBaron Documentation',
[u'Laurent Peuch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RedBaron', u'RedBaron Documentation',
u'Laurent Peuch', 'RedBaron', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
a0ece73e9e41e6c549bf663092a130f37be60564 | 2d0b7c568de35671c54bbe0a32a59bf72983273e | /src/token_generator.py | cbf968be44d0975bbe24f871d1e9461588fe1862 | [] | no_license | pantsbuild/rbe-token-server | 7ae2a4668990cb69e193f4c529987fe544ae831e | 3bfc6410365e415c40b7c4b1879195ba533d8804 | refs/heads/master | 2021-06-19T05:39:02.068921 | 2020-02-23T21:23:49 | 2020-02-23T21:23:49 | 195,114,143 | 0 | 3 | null | 2021-03-20T01:38:52 | 2019-07-03T19:20:04 | Python | UTF-8 | Python | false | false | 861 | py | from __future__ import annotations
from google.cloud import iam_credentials_v1
credentials_client = iam_credentials_v1.IAMCredentialsClient()
# NB: The project name must be a wildcard `-`, per
# https://cloud.google.com/iam/credentials/reference/rest/v1/projects.serviceAccounts/generateAccessToken.
resource_name = credentials_client.service_account_path(
project="-", service_account="[email protected]"
)
# NB: This may either be `auth/cloud-platform` or `auth/iam`, per
# https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials#sa-credentials-oauth
scope = ["https://www.googleapis.com/auth/cloud-platform"]
def generate() -> str:
access_token: str = credentials_client.generate_access_token(
name=resource_name, scope=scope
).access_token
return access_token
| [
"[email protected]"
] | |
213483598738cb2665edf987aeda25096640069a | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /ABC/ABC101-150/ABC140/A.py | ea6684c60f4ced8735ed7582a6df92a8e687b9ea | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 23 | py | print(int(input())**3)
| [
"[email protected]"
] | |
d083ca1556dd430f3d6201dd69d2bae797c40620 | ea7d2090ba1d66fc5bf91b255742ae07e1f74c3d | /2020/insomnihack_teaser/welcome/pow.py | 7fe66a371a9a4572dc10a0584c49d9cb52517718 | [] | no_license | arty-hlr/CTF-writeups | 1a3e29b9a3c3b80e33df0c9489cacd6ec09e46fe | 64bcda1d1d8893c2ece308f82348755a2c62ca9e | refs/heads/master | 2022-08-04T20:26:07.428393 | 2022-07-30T11:11:34 | 2022-07-30T11:11:34 | 167,851,059 | 4 | 3 | null | 2022-07-30T10:55:11 | 2019-01-27T19:47:03 | Python | UTF-8 | Python | false | false | 232 | py | import hashX
from pwn import *
s = remote('welcome.insomnihack.ch',1337)
s.recvuntil('with "')
h = s.recv(6).decode()
found = hashX.main(h,'md5')
log.info(f"found string: {found}")
s.sendline(found)
log.info(s.recvall().decode())
| [
"[email protected]"
] | |
d584d7c8d4764221eb0ba568444c06c5b4f675d2 | 4bc048ebbf5d28b399d3ab89e717f3e7496abc38 | /periods/tests/test_email_sender.py | e6b3eb6a2683c922dfbbdc6f65b6fe9db9609a2b | [
"MIT"
] | permissive | jessamynsmith/eggtimer-server | 3feff03057148f7ab54c0df8c863f1543be886a9 | e1b1d9d848893b9e6e56e985da74d6b378c07744 | refs/heads/master | 2023-02-05T05:38:51.194914 | 2023-02-04T04:27:02 | 2023-02-04T04:27:02 | 8,124,406 | 50 | 18 | MIT | 2023-01-11T12:45:53 | 2013-02-10T15:57:26 | Python | UTF-8 | Python | false | false | 871 | py | from django.contrib.auth import get_user_model
from django.test import TestCase
from mock import patch
from periods import email_sender
class TestEmailSender(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
password='bogus', email='[email protected]', first_name=u'Jessamyn')
@patch('django.core.mail.EmailMultiAlternatives.send')
def test_send_text_only(self, mock_send):
result = email_sender.send(self.user, 'Hi!', 'good day', None)
self.assertEqual(True, result)
mock_send.assert_called_once_with()
@patch('django.core.mail.EmailMultiAlternatives.send')
def test_send_with_html(self, mock_send):
result = email_sender.send(self.user, 'Hi!', 'good day', '<p>good day</p>')
self.assertEqual(True, result)
mock_send.assert_called_once_with()
| [
"[email protected]"
] | |
ae04c679aa39b3879f61ca4cc2bb61cc2caaa05f | 636007d520745778b34ecb57ef0fce5e9ac04481 | /mantle_transition_zone_migration_obspy/section_migration_py/cross_section_Ps_Pds_bootstrap.py | 6daf4611ceea6f0a7d20aba38937ecceb30a8ff1 | [] | no_license | dIOGOLOC/codes_escritos | afc1e67686ace9162080afd6c61783508965d684 | b8b5343d1e9ff28a39fc7221f33952f0ffd3db35 | refs/heads/master | 2023-06-08T12:40:56.511078 | 2023-06-02T23:33:57 | 2023-06-02T23:33:57 | 94,356,450 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,700 | py | # coding: utf-8
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import obspy
import os
from obspy.taup import TauPyModel
from obspy.geodetics import kilometer2degrees
import copy
import matplotlib
from matplotlib.cm import get_cmap
from mpl_toolkits.mplot3d import Axes3D
import cartopy.crs as ccrs
from cartopy.io.shapereader import Reader
import cartopy.feature as cfeature
from fatiando import gridder, utils
import scipy.io
import matplotlib.cm as cm
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import json
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.patches import Circle,Rectangle
import math
from parameters_py.mgconfig import (
RF_DIR,RF_EXT,MODEL_FILE_NPZ,MIN_DEPTH,MAX_DEPTH,INTER_DEPTH,
NUMBER_PP_PER_BIN,GRID_PP_MULT,COLORMAP_STD,
LLCRNRLON_LARGE,LLCRNRLAT_LARGE,URCRNRLON_LARGE,URCRNRLAT_LARGE,LLCRNRLON_SMALL,
URCRNRLON_SMALL,LLCRNRLAT_SMALL,URCRNRLAT_SMALL,PROJECT_LAT,PROJECT_LON,
BOUNDARY_1_SHP,BOUNDARY_2_SHP,OUTPUT_DIR,
EXT_FIG,DPI_FIG,FRESNEL_ZONE_RADIUS,DIST_GRID_PP,DEPTH_RANGE,COLORMAP_VEL,
NUMBER_PP_PER_BIN,NUMBER_STA_PER_BIN,DEPTH_TARGET
)
print('Starting Cross section CODE')
print('\n')
STA_DIR = OUTPUT_DIR+'MODEL_INTER_DEPTH_'+str(INTER_DEPTH)+'_DEPTH_TARGET_'+str(DEPTH_TARGET)+'/'+'Stations'+'/'
print('Looking for receiver functions data in JSON file in '+STA_DIR)
print('\n')
filename_STA = STA_DIR+'sta_dic.json'
sta_dic = json.load(open(filename_STA))
event_depth = sta_dic['event_depth']
event_lat = sta_dic['event_lat']
event_long = sta_dic['event_long']
event_dist = sta_dic['event_dist']
event_gcarc = sta_dic['event_gcarc']
event_sta = sta_dic['event_sta']
event_ray = sta_dic['event_ray']
sta_lat = sta_dic['sta_lat']
sta_long = sta_dic['sta_long']
sta_data = sta_dic['sta_data']
sta_time = sta_dic['sta_time']
print('Importing selected binned data')
print('\n')
PP_SELEC_DIR = OUTPUT_DIR+'MODEL_INTER_DEPTH_'+str(INTER_DEPTH)+'_DEPTH_TARGET_'+str(DEPTH_TARGET)+'/'+'SELECTED_BINNED_DATA'+'/'
RESULTS_FOLDER_BINS = PP_SELEC_DIR+'/'+'RESULTS_NUMBER_PP_PER_BIN_'+str(NUMBER_PP_PER_BIN)+'_NUMBER_STA_PER_BIN_'+str(NUMBER_STA_PER_BIN)+'/'
filename = RESULTS_FOLDER_BINS+'SELECTED_BINNED.json'
SELECTED_BINNED_DATA_dic = json.load(open(filename))
lats = SELECTED_BINNED_DATA_dic['lat']
lons = SELECTED_BINNED_DATA_dic['lon']
RF_number = SELECTED_BINNED_DATA_dic['len_Pds']
RF_stacking_Pds = SELECTED_BINNED_DATA_dic['data_Pds']
RF_stacking_Ppds = SELECTED_BINNED_DATA_dic['data_Ppds']
RF_stacking_Pds_BOOTSTRAP = SELECTED_BINNED_DATA_dic['data_BOOTSTRAP_Pds']
RF_stacking_Ppds_BOOTSTRAP = SELECTED_BINNED_DATA_dic['data_BOOTSTRAP_Ppds']
RF_BOOTSTRAP_DEPTH_mean_1_Pds = SELECTED_BINNED_DATA_dic['RF_BOOTSTRAP_DEPTH_mean_1_Pds']
RF_BOOTSTRAP_DEPTH_mean_1_Ppds = SELECTED_BINNED_DATA_dic['RF_BOOTSTRAP_DEPTH_mean_1_Ppds']
RF_BOOTSTRAP_DEPTH_mean_520_Pds = SELECTED_BINNED_DATA_dic['RF_BOOTSTRAP_DEPTH_mean_520_Pds']
RF_BOOTSTRAP_DEPTH_mean_520_Ppds = SELECTED_BINNED_DATA_dic['RF_BOOTSTRAP_DEPTH_mean_520_Ppds']
RF_BOOTSTRAP_DEPTH_mean_2_Pds = SELECTED_BINNED_DATA_dic['RF_BOOTSTRAP_DEPTH_mean_2_Pds']
RF_BOOTSTRAP_DEPTH_mean_2_Ppds = SELECTED_BINNED_DATA_dic['RF_BOOTSTRAP_DEPTH_mean_2_Ppds']
RF_DEPTH_mean_1_Pds = SELECTED_BINNED_DATA_dic['mean_1_Pds']
RF_DEPTH_std_1_Pds = SELECTED_BINNED_DATA_dic['std_1_Pds']
RF_DEPTH_mean_520_Pds = SELECTED_BINNED_DATA_dic['mean_520_Pds']
RF_DEPTH_std_520_Pds = SELECTED_BINNED_DATA_dic['std_520_Pds']
RF_DEPTH_mean_2_Pds = SELECTED_BINNED_DATA_dic['mean_2_Pds']
RF_DEPTH_std_2_Pds = SELECTED_BINNED_DATA_dic['std_2_Pds']
RF_DEPTH_mean_1_Ppds = SELECTED_BINNED_DATA_dic['mean_1_Ppds']
RF_DEPTH_std_1_Ppds = SELECTED_BINNED_DATA_dic['std_1_Ppds']
RF_DEPTH_mean_520_Ppds = SELECTED_BINNED_DATA_dic['mean_520_Ppds']
RF_DEPTH_std_520_Ppds = SELECTED_BINNED_DATA_dic['std_520_Ppds']
RF_DEPTH_mean_2_Ppds = SELECTED_BINNED_DATA_dic['mean_2_Ppds']
RF_DEPTH_std_2_Ppds = SELECTED_BINNED_DATA_dic['std_2_Ppds']
RF_DEPTH_mtz_thickness_Pds = SELECTED_BINNED_DATA_dic['mtz_thickness_Pds']
RF_DEPTH_mtz_thickness_Pds_std = SELECTED_BINNED_DATA_dic['mtz_thickness_Pds_std']
RF_DEPTH_mtz_thickness_Ppds = SELECTED_BINNED_DATA_dic['mtz_thickness_Ppds']
RF_DEPTH_mtz_thickness_Ppds_std = SELECTED_BINNED_DATA_dic['mtz_thickness_Ppds_std']
RF_DEPTH_true_thickness_MTZ_Pds = SELECTED_BINNED_DATA_dic['true_thickness_MTZ_Pds']
RF_DEPTH_true_thickness_MTZ_Pds_std = SELECTED_BINNED_DATA_dic['true_thickness_MTZ_Pds_std']
RF_DEPTH_true_thickness_MTZ_Ppds = SELECTED_BINNED_DATA_dic['true_thickness_MTZ_Ppds']
RF_DEPTH_true_thickness_MTZ_Ppds_std = SELECTED_BINNED_DATA_dic['true_thickness_MTZ_Ppds_std']
RF_DEPTH_mean_1_true_Pds = SELECTED_BINNED_DATA_dic['true_mean_1_Pds']
RF_DEPTH_std_1_true_Pds = SELECTED_BINNED_DATA_dic['true_std_1_Pds']
RF_DEPTH_mean_2_true_Pds = SELECTED_BINNED_DATA_dic['true_mean_2_Pds']
RF_DEPTH_std_2_true_Pds = SELECTED_BINNED_DATA_dic['true_std_2_Pds']
RF_DEPTH_mean_1_true_Ppds = SELECTED_BINNED_DATA_dic['true_mean_1_Ppds']
RF_DEPTH_std_1_true_Ppds = SELECTED_BINNED_DATA_dic['true_std_1_Ppds']
RF_DEPTH_mean_2_true_Ppds = SELECTED_BINNED_DATA_dic['true_mean_2_Ppds']
RF_DEPTH_std_2_true_Ppds = SELECTED_BINNED_DATA_dic['true_std_2_Ppds']
RF_delta_1_Vp_mean = SELECTED_BINNED_DATA_dic['delta_1_Vp_mean']
RF_delta_1_Vp_std = SELECTED_BINNED_DATA_dic['delta_1_Vp_std']
RF_delta_1_Vs_mean = SELECTED_BINNED_DATA_dic['delta_1_Vs_mean']
RF_delta_1_Vs_std = SELECTED_BINNED_DATA_dic['delta_1_Vs_std']
RF_delta_2_Vp_mean = SELECTED_BINNED_DATA_dic['delta_2_Vp_mean']
RF_delta_2_Vp_std = SELECTED_BINNED_DATA_dic['delta_2_Vp_std']
RF_delta_2_Vs_mean = SELECTED_BINNED_DATA_dic['delta_2_Vs_mean']
RF_delta_2_Vs_std = SELECTED_BINNED_DATA_dic['delta_2_Vs_std']
print('Calculating earth model layers')
print('\n')
camadas_terra_10_km = np.arange(MIN_DEPTH,MAX_DEPTH+INTER_DEPTH,INTER_DEPTH)
print('Plotting ...')
print('\n')
#Color Maps
colormap = plt.get_cmap(COLORMAP_VEL)
fig, ax = plt.subplots(nrows=1, ncols=1, subplot_kw={'projection': ccrs.Mercator(central_longitude=PROJECT_LON, globe=None)},figsize=(10,5))
ax.set_extent([LLCRNRLON_LARGE,URCRNRLON_LARGE,LLCRNRLAT_LARGE,URCRNRLAT_LARGE])
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.set_xticks(np.arange(LLCRNRLON_LARGE,URCRNRLON_LARGE,4), crs=ccrs.PlateCarree())
ax.set_yticks(np.arange(LLCRNRLAT_LARGE,URCRNRLAT_LARGE,4), crs=ccrs.PlateCarree())
ax.tick_params(labelbottom=True,labeltop=True,labelleft=True,labelright=True)
ax.grid(True,which='major',color='gray',linewidth=1,linestyle='None')
reader_1_SHP = Reader(BOUNDARY_1_SHP)
shape_1_SHP = list(reader_1_SHP.geometries())
plot_shape_1_SHP = cfeature.ShapelyFeature(shape_1_SHP, ccrs.PlateCarree())
ax.add_feature(plot_shape_1_SHP, facecolor='none', edgecolor='k',linewidth=3)
reader_2_SHP = Reader(BOUNDARY_2_SHP)
shape_2_SHP = list(reader_2_SHP.geometries())
plot_shape_2_SHP = cfeature.ShapelyFeature(shape_2_SHP, ccrs.PlateCarree())
ax.add_feature(plot_shape_2_SHP, facecolor='none', edgecolor='k',linewidth=1)
norm_410 = mpl.colors.Normalize(vmin=200,vmax=300,clip=True)
for i,j in enumerate(lons):
if math.isnan(RF_DEPTH_true_thickness_MTZ_Pds[i]) == False:
circulo_410 = Circle(radius=DIST_GRID_PP*(1-(RF_DEPTH_true_thickness_MTZ_Pds_std[i]/50)),xy=(lons[i],lats[i]),color=colormap(norm_410(RF_DEPTH_true_thickness_MTZ_Pds[i])), ec='None',transform=ccrs.Geodetic(),zorder=3)
ax.add_patch(circulo_410)
circulo_410.pickable()
circulo_410.set_picker(True)
ax.plot(sta_long,sta_lat, '^',markersize=10,markeredgecolor='k',markerfacecolor='grey',transform=ccrs.PlateCarree())
#______________________________________________________________________
sm_410 = plt.cm.ScalarMappable(cmap=colormap,norm=norm_410)
sm_410._A = []
fig.colorbar(sm_410,ax=ax,orientation='horizontal',shrink=0.8)
plt.title('Pick four points for bootstrapping cross-section and them close the windows', y=1.08)
lon_click = []
lat_click = []
def onpick1(event):
if isinstance(event.artist,Circle):
patch = event.artist
patch_lon = float("%.4f" % round(patch.center[0],4))
patch_lat = float("%.4f" % round(patch.center[1],4))
print('Return lon/lat of the rectangle selected (left,bottom)')
print('lon='+str(patch_lon), 'lat='+str(patch_lat))
lon_click.append(patch_lon)
lat_click.append(patch_lat)
retangulo_PICK = Circle(radius=DIST_GRID_PP,xy=(patch_lon, patch_lat),color='k', ec='k',linewidth=1,transform=ccrs.Geodetic(),zorder=5)
ax.add_patch(retangulo_PICK)
plt.draw()
if len(lon_click) == 4:
fig.canvas.mpl_disconnect(cid)
return lon_click,lat_click
cid = fig.canvas.mpl_connect('pick_event', onpick1)
plt.show()
PP_FIGURE = OUTPUT_DIR+'MODEL_INTER_DEPTH_'+str(INTER_DEPTH)+'_DEPTH_TARGET_'+str(DEPTH_TARGET)+'/'+'Figures'+'/'
RESULTS_FOLDER = PP_FIGURE+'/'+'RESULTS_NUMBER_PP_PER_BIN_'+str(NUMBER_PP_PER_BIN)+'_NUMBER_STA_PER_BIN_'+str(NUMBER_STA_PER_BIN)+'/'
os.makedirs(RESULTS_FOLDER,exist_ok=True)
fig.savefig(RESULTS_FOLDER+'SELECTED_BINNED_DATA_CROSS_SECTION_Pds_Ppds_bootstrap_points.'+EXT_FIG,dpi=DPI_FIG)
print('Allocating points')
print('\n')
#Profile lat/lon
RF_lat_profile = []
RF_lon_profile = []
#Profile Data
RF_data_profile_Pds = []
RF_data_profile_Ppds = []
#P410s
RF_DEPTH_mean_1_profile_Pds = []
RF_DEPTH_std_1_profile_Pds = []
#Pp410s
RF_DEPTH_mean_1_profile_Ppds = []
RF_DEPTH_std_1_profile_Ppds = []
#P520s
RF_DEPTH_mean_520_profile_Pds = []
RF_DEPTH_std_520_profile_Pds = []
#Pp520s
RF_DEPTH_mean_520_profile_Ppds = []
RF_DEPTH_std_520_profile_Ppds = []
#P660s
RF_DEPTH_mean_2_profile_Pds = []
RF_DEPTH_std_2_profile_Pds = []
#Pp660s
RF_DEPTH_mean_2_profile_Ppds = []
RF_DEPTH_std_2_profile_Ppds = []
#MTZ Pds
RF_DEPTH_mtz_thickness_profile_Pds = []
RF_DEPTH_mtz_thickness_profile_Pds_std = []
#MTZ Ppds
RF_DEPTH_mtz_thickness_profile_Ppds = []
RF_DEPTH_mtz_thickness_profile_Ppds_std = []
# True 410 km
RF_DEPTH_mean_1_true_profile = []
RF_DEPTH_std_1_true_profile = []
# True 660 km
RF_DEPTH_mean_2_true_profile = []
RF_DEPTH_std_2_true_profile = []
# True MTZ
RF_DEPTH_true_thickness_MTZ_profile = []
RF_DEPTH_true_thickness_MTZ_profile_std = []
#Bootstrap Data Receiver Functions Pds and Ppds
RF_stacking_Pds_BOOTSTRAP_profile = []
RF_stacking_Ppds_BOOTSTRAP_profile = []
#Bootstrap Data Mean P410s
RF_BOOTSTRAP_DEPTH_mean_1_Pds_profile = []
#Bootstrap Data Mean Pp410s
RF_BOOTSTRAP_DEPTH_mean_1_Ppds_profile = []
#Bootstrap Data Mean P520s
RF_BOOTSTRAP_DEPTH_mean_520_Pds_profile = []
#Bootstrap Data Mean Pp520s
RF_BOOTSTRAP_DEPTH_mean_520_Ppds_profile = []
#Bootstrap Data Mean P660s
RF_BOOTSTRAP_DEPTH_mean_2_Pds_profile = []
#Bootstrap Data Mean P660s
RF_BOOTSTRAP_DEPTH_mean_2_Ppds_profile = []
lat_lon = [(lons[k],lats[k]) for k,l in enumerate(lats)]
for i,j in enumerate(lon_click):
idx = lat_lon.index((lon_click[i],lat_click[i]))
#Profile lat/lon
RF_lat_profile.append(lats[idx])
RF_lon_profile.append(lons[idx])
#Profile Data
RF_data_profile_Pds.append(RF_stacking_Pds[idx])
RF_data_profile_Ppds.append(RF_stacking_Ppds[idx])
#P410s
RF_DEPTH_mean_1_profile_Pds.append(RF_DEPTH_mean_1_Pds[idx])
RF_DEPTH_std_1_profile_Pds.append(RF_DEPTH_std_1_Pds[idx])
#Pp410s
RF_DEPTH_mean_1_profile_Ppds.append(RF_DEPTH_mean_1_Ppds[idx])
RF_DEPTH_std_1_profile_Ppds.append(RF_DEPTH_std_1_Ppds[idx])
#P520s
RF_DEPTH_mean_520_profile_Pds.append(RF_DEPTH_mean_520_Pds[idx])
RF_DEPTH_std_520_profile_Pds.append(RF_DEPTH_std_520_Pds[idx])
#Pp520s
RF_DEPTH_mean_520_profile_Ppds.append(RF_DEPTH_mean_520_Ppds[idx])
RF_DEPTH_std_520_profile_Ppds.append(RF_DEPTH_std_520_Ppds[idx])
#P660s
RF_DEPTH_mean_2_profile_Pds.append(RF_DEPTH_mean_2_Pds[idx])
RF_DEPTH_std_2_profile_Pds.append(RF_DEPTH_std_2_Pds[idx])
#Pp660s
RF_DEPTH_mean_2_profile_Ppds.append(RF_DEPTH_mean_2_Ppds[idx])
RF_DEPTH_std_2_profile_Ppds.append(RF_DEPTH_std_2_Ppds[idx])
#MTZ Pds
RF_DEPTH_mtz_thickness_profile_Pds.append(RF_DEPTH_mtz_thickness_Pds[idx])
RF_DEPTH_mtz_thickness_profile_Pds_std.append(RF_DEPTH_mtz_thickness_Pds_std[idx])
#MTZ Ppds
RF_DEPTH_mtz_thickness_profile_Ppds.append(RF_DEPTH_mtz_thickness_Ppds[idx])
RF_DEPTH_mtz_thickness_profile_Ppds_std.append(RF_DEPTH_mtz_thickness_Ppds_std[idx])
# True 410 km
RF_DEPTH_mean_1_true_profile.append(RF_DEPTH_mean_1_true_Pds[idx])
RF_DEPTH_std_1_true_profile.append(RF_DEPTH_std_1_true_Pds[idx])
# True 660 km
RF_DEPTH_mean_2_true_profile.append(RF_DEPTH_mean_2_true_Pds[idx])
RF_DEPTH_std_2_true_profile.append(RF_DEPTH_std_2_true_Pds[idx])
# True MTZ
RF_DEPTH_true_thickness_MTZ_profile.append(RF_DEPTH_true_thickness_MTZ_Pds[idx])
RF_DEPTH_true_thickness_MTZ_profile_std.append(RF_DEPTH_true_thickness_MTZ_Pds_std[idx])
#Bootstrap Data Receiver Functions Pds and Ppds
RF_stacking_Pds_BOOTSTRAP_profile.append(RF_stacking_Pds_BOOTSTRAP[idx])
RF_stacking_Ppds_BOOTSTRAP_profile.append(RF_stacking_Ppds_BOOTSTRAP[idx])
#Bootstrap Data Mean P410s
RF_BOOTSTRAP_DEPTH_mean_1_Pds_profile.append(RF_BOOTSTRAP_DEPTH_mean_1_Pds[idx])
#Bootstrap Data Mean Pp410s
RF_BOOTSTRAP_DEPTH_mean_1_Ppds_profile.append(RF_BOOTSTRAP_DEPTH_mean_1_Ppds[idx])
#Bootstrap Data Mean P520s
RF_BOOTSTRAP_DEPTH_mean_520_Pds_profile.append(RF_BOOTSTRAP_DEPTH_mean_520_Pds[idx])
#Bootstrap Data Mean Pp520s
RF_BOOTSTRAP_DEPTH_mean_520_Ppds_profile.append(RF_BOOTSTRAP_DEPTH_mean_520_Ppds[idx])
#Bootstrap Data Mean P660s
RF_BOOTSTRAP_DEPTH_mean_2_Pds_profile.append(RF_BOOTSTRAP_DEPTH_mean_2_Pds[idx])
#Bootstrap Data Mean P660s
RF_BOOTSTRAP_DEPTH_mean_2_Ppds_profile.append(RF_BOOTSTRAP_DEPTH_mean_2_Ppds[idx])
print('Plotting the Final Figure')
#Cross section figure
fig = plt.figure(figsize=(30, 10))
fig.suptitle('Pds and Ppds Bootstrapping points')
gs = gridspec.GridSpec(6, 8)
gs.update(wspace=0.5, hspace=0.75)
#Figure Pds
for _i, _j in enumerate(RF_data_profile_Pds):
pds_grid = fig.add_subplot(gs[0:3, _i*2:_i*2+1])
ppds_grid = fig.add_subplot(gs[3:6, _i*2:_i*2+1])
pds_grid_410_660 = fig.add_subplot(gs[0:3, _i*2+1])
ppds_grid_410_660 = fig.add_subplot(gs[3:6,_i*2+1])
factor_Pds = 1
majorLocatorY = MultipleLocator(50)
minorLocatorY = MultipleLocator(10)
x_data_Pds= []
for x,c in enumerate(RF_stacking_Pds_BOOTSTRAP_profile[_i]):
RF_data_factor_Pds_bootstrap = [l for k, l in enumerate(c)]
x_data_Pds.append(RF_data_factor_Pds_bootstrap)
pds_grid.plot(RF_data_factor_Pds_bootstrap,camadas_terra_10_km,'silver',linewidth=0.1, zorder=10)
min_x = [min(a) for a in zip(*x_data_Pds)]
max_x = [max(a) for a in zip(*x_data_Pds)]
pds_grid.fill_betweenx(y=camadas_terra_10_km,x1=min_x, x2=max_x, facecolor='whitesmoke',alpha=0.3, interpolate=True, zorder=5)
if math.isnan(RF_DEPTH_mean_1_profile_Pds[i]) == False:
pds_grid.text(-0.0095,RF_DEPTH_mean_1_profile_Pds[_i],str(round(RF_DEPTH_mean_1_profile_Pds[_i]))+'±'+str(round(RF_DEPTH_std_1_profile_Pds[_i])),zorder=40,fontsize=6, fontweight='bold',ha='left',bbox={'facecolor':'white','edgecolor':'none','pad':1})
#if math.isnan(RF_DEPTH_std_520_profile_Pds[i]) == False:
# pds_grid.text(-0.0095,RF_DEPTH_mean_520_profile_Pds[_i],str(round(RF_DEPTH_mean_520_profile_Pds[_i]))+'±'+str(round(RF_DEPTH_std_520_profile_Pds[_i])),zorder=41,fontsize=6, fontweight='bold',ha='left',bbox={'facecolor':'white','edgecolor':'none','pad':1})
if math.isnan(RF_DEPTH_mean_2_profile_Pds[i]) == False:
pds_grid.text(-0.0095,RF_DEPTH_mean_2_profile_Pds[_i],str(round(RF_DEPTH_mean_2_profile_Pds[_i]))+'±'+str(round(RF_DEPTH_std_2_profile_Pds[_i])),zorder=42,fontsize=6, fontweight='bold',ha='left',bbox={'facecolor':'white','edgecolor':'none','pad':1})
RF_data_factor_Pds = [l for k, l in enumerate(_j)]
pds_grid.plot(RF_data_factor_Pds,camadas_terra_10_km,'k',linewidth=2, zorder=30)
pds_grid.yaxis.set_ticks_position('both')
pds_grid.yaxis.set_major_locator(majorLocatorY)
pds_grid.yaxis.set_minor_locator(minorLocatorY)
pds_grid.grid(True,which='major',linestyle='None')
pds_grid.fill_betweenx(camadas_terra_10_km,RF_data_factor_Pds,0,where=np.array(RF_data_factor_Pds)>=0,alpha=0.5, facecolor='dimgrey',interpolate=True, zorder=19)
pds_grid.fill_betweenx(camadas_terra_10_km,RF_data_factor_Pds,0,where=np.array(RF_data_factor_Pds)<=0,alpha=0.5, facecolor='lightgrey', interpolate=True, zorder=20)
#pds_grid.set_xticks([])
pds_grid.set_xlim(-0.01,0.01)
pds_grid.set_title('Lat = '+str(round(RF_lat_profile[_i],1))+' - Lon = '+str(round(RF_lon_profile[_i],1)))
pds_grid.set_ylim(800,300)
if _i == 0:
pds_grid.set_ylabel('Depth (km)')
pds_grid.yaxis.set_label_position("left")
if _i != 0:
pds_grid.axes.axes.yaxis.set_ticklabels([])
#Figure Ppds
factor_Ppds = 1
x_data_Ppds = []
for x,c in enumerate(RF_stacking_Ppds_BOOTSTRAP_profile[_i]):
RF_data_factor_Ppds_bootstrap = [l for k, l in enumerate(c)]
x_data_Ppds.append(RF_data_factor_Ppds_bootstrap)
ppds_grid.plot(RF_data_factor_Ppds_bootstrap,camadas_terra_10_km,'silver',linewidth=0.1, zorder=10)
min_x = [min(a) for a in zip(*x_data_Ppds)]
max_x = [max(a) for a in zip(*x_data_Ppds)]
ppds_grid.fill_betweenx(y=camadas_terra_10_km,x1=min_x, x2=max_x, facecolor='whitesmoke',alpha=0.3, interpolate=True, zorder=5)
RF_data_factor_Ppds = [l for k, l in enumerate(RF_data_profile_Ppds[_i])]
ppds_grid.plot(RF_data_factor_Ppds,camadas_terra_10_km,'k',linewidth=2, zorder=30)
if math.isnan(RF_DEPTH_mean_1_profile_Ppds[i]) == False:
ppds_grid.text(-0.0095,RF_DEPTH_mean_1_profile_Ppds[_i],str(round(RF_DEPTH_mean_1_profile_Ppds[_i]))+'±'+str(round(RF_DEPTH_std_1_profile_Ppds[_i])),zorder=40,fontsize=6, fontweight='bold',ha='left',bbox={'facecolor':'white','edgecolor':'none','pad':1})
#if math.isnan(RF_DEPTH_mean_520_profile_Ppds[i]) == False:
# ppds_grid.text(-0.0095,RF_DEPTH_mean_520_profile_Ppds[_i],str(round(RF_DEPTH_mean_520_profile_Ppds[_i]))+'±'+str(round(RF_DEPTH_std_520_profile_Ppds[_i])),zorder=41,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
if math.isnan(RF_DEPTH_mean_2_profile_Ppds[i]) == False:
ppds_grid.text(-0.0095,RF_DEPTH_mean_2_profile_Ppds[_i],str(round(RF_DEPTH_mean_2_profile_Ppds[_i]))+'±'+str(round(RF_DEPTH_std_2_profile_Ppds[_i])),zorder=42,fontsize=6, fontweight='bold',ha='left',bbox={'facecolor':'white','edgecolor':'none','pad':1})
ppds_grid.fill_betweenx(camadas_terra_10_km,RF_data_factor_Ppds,0,where=np.array(RF_data_factor_Ppds)>=0,alpha=0.5, facecolor='dimgrey',interpolate=True, zorder=19)
ppds_grid.fill_betweenx(camadas_terra_10_km,RF_data_factor_Ppds,0,where=np.array(RF_data_factor_Ppds)<=0,alpha=0.5, facecolor='lightgrey',interpolate=True, zorder=20)
ppds_grid.yaxis.set_major_locator(majorLocatorY)
ppds_grid.yaxis.set_minor_locator(minorLocatorY)
ppds_grid.grid(True,which='major',linestyle='None')
ppds_grid.yaxis.set_ticks_position('both')
#ppds_grid.set_xticks([])
ppds_grid.set_xlim(-0.01,0.01)
ppds_grid.set_ylim(800,300)
if _i == 0:
ppds_grid.set_ylabel('Depth (km)')
ppds_grid.yaxis.set_label_position("left")
if _i != 0:
ppds_grid.axes.axes.yaxis.set_ticklabels([])
#### Plot Depth 410 Pds ####
pds_grid_410_660.hist(RF_BOOTSTRAP_DEPTH_mean_1_Pds_profile[_i],bins=5,orientation='horizontal',color='k')
#### Plot Depth 520 Pds ####
pds_grid_410_660.hist(RF_BOOTSTRAP_DEPTH_mean_520_Pds_profile[_i],bins=5,orientation='horizontal',color='k')
#### Plot Depth 660 Pds ####
pds_grid_410_660.hist(RF_BOOTSTRAP_DEPTH_mean_2_Pds_profile[_i],bins=5,orientation='horizontal',color='k')
pds_grid_410_660.yaxis.set_ticks_position('both')
pds_grid_410_660.yaxis.set_ticks_position('both')
pds_grid_410_660.yaxis.set_major_locator(majorLocatorY)
pds_grid_410_660.yaxis.set_minor_locator(minorLocatorY)
pds_grid_410_660.grid(True,which='major',linestyle='None')
pds_grid_410_660.set_xlim(0,100)
pds_grid_410_660.set_ylim(800,300)
pds_grid_410_660.axes.axes.xaxis.set_ticklabels([])
if _i != 3:
pds_grid_410_660.axes.axes.yaxis.set_ticklabels([])
if _i == 3:
pds_grid_410_660.set_ylabel('Depth (km)')
pds_grid_410_660.yaxis.set_label_position("right")
pds_grid_410_660.tick_params(labelright=True,labelleft=False)
pds_grid_410_660.text(5,550,' MTZ = '+str(round(RF_DEPTH_mtz_thickness_profile_Pds[_i]))+'±'+str(round(RF_DEPTH_mtz_thickness_profile_Pds_std[_i])),zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
pds_grid_410_660.text(5,580,'(MTZ = '+str(round(RF_DEPTH_true_thickness_MTZ_profile[_i]))+'±'+str(round(RF_DEPTH_true_thickness_MTZ_profile_std[_i]))+')',zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
pds_grid_410_660.text(5,RF_DEPTH_mean_1_profile_Pds[_i]-30,'('+str(round(RF_DEPTH_mean_1_true_profile[_i]))+'±'+str(round(RF_DEPTH_std_1_true_profile[_i]))+')',zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
pds_grid_410_660.text(5,RF_DEPTH_mean_2_profile_Pds[_i]+40,'('+str(round(RF_DEPTH_mean_2_true_profile[_i]))+'±'+str(round(RF_DEPTH_std_2_true_profile[_i]))+')',zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
#### Plot Depth 410 Ppds ####
ppds_grid_410_660.hist(RF_BOOTSTRAP_DEPTH_mean_1_Ppds_profile[_i],bins=5,orientation='horizontal',color='k')
#### Plot Depth 520 Ppds ####
ppds_grid_410_660.hist(RF_BOOTSTRAP_DEPTH_mean_520_Ppds_profile[_i],bins=5,orientation='horizontal',color='k')
#### Plot Depth 660 Ppds ####
ppds_grid_410_660.hist(RF_BOOTSTRAP_DEPTH_mean_2_Ppds_profile[_i],bins=5,orientation='horizontal',color='k')
ppds_grid_410_660.yaxis.set_ticks_position('both')
ppds_grid_410_660.set_xlim(0,100)
ppds_grid_410_660.yaxis.set_ticks_position('both')
ppds_grid_410_660.yaxis.set_major_locator(majorLocatorY)
ppds_grid_410_660.yaxis.set_minor_locator(minorLocatorY)
ppds_grid_410_660.grid(True,which='major',linestyle='None')
ppds_grid_410_660.set_xlabel('Population')
ppds_grid_410_660.set_ylim(800,300)
ppds_grid_410_660.text(5,550,' MTZ = '+str(round(RF_DEPTH_mtz_thickness_profile_Ppds[_i]))+'±'+str(round(RF_DEPTH_mtz_thickness_profile_Ppds_std[_i])),zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
ppds_grid_410_660.text(5,580,'(MTZ = '+str(round(RF_DEPTH_true_thickness_MTZ_profile[_i]))+'±'+str(round(RF_DEPTH_true_thickness_MTZ_profile_std[_i]))+')',zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
ppds_grid_410_660.text(5,RF_DEPTH_mean_1_profile_Ppds[_i]-30,'('+str(round(RF_DEPTH_mean_1_true_profile[_i]))+'±'+str(round(RF_DEPTH_std_1_true_profile[_i]))+')',zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
ppds_grid_410_660.text(5,RF_DEPTH_mean_2_profile_Ppds[_i]+40,'('+str(round(RF_DEPTH_mean_2_true_profile[_i]))+'±'+str(round(RF_DEPTH_std_2_true_profile[_i]))+')',zorder=40,fontsize=6, fontweight='bold',bbox={'facecolor':'white','edgecolor':'none','pad':1})
if _i != 3:
ppds_grid_410_660.axes.axes.yaxis.set_ticklabels([])
if _i == 3:
ppds_grid_410_660.set_ylabel('Depth (km)')
ppds_grid_410_660.yaxis.set_label_position("right")
ppds_grid_410_660.tick_params(labelright=True,labelleft=False)
plt.show()
fig.savefig(RESULTS_FOLDER+'SELECTED_BINNED_DATA_CROSS_SECTION_Pds_Ppds_bootstrap.'+EXT_FIG,dpi=DPI_FIG)
print('Ending the Cross section CODE')
| [
"[email protected]"
] | |
8958e6f53eb6f329cbf8d3590c910185087cc17e | 9ae08906602af5eacec43d60e5e428269bf24eb1 | /find_light_mask.py | 1a766fd56ac704dffe8c178d500376243cf549ba | [] | no_license | yangzhaonan18/TSDcv2 | 9f73278979542d1a40ced5aa152bbc7fa363398c | e9cb0fefc7177db93510b7bc5ca1bb86e32571c6 | refs/heads/master | 2020-04-04T19:39:08.138349 | 2019-03-18T04:54:36 | 2019-03-18T04:54:36 | 156,214,910 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | # -*- coding:utf-8 -*-
import cv2
import numpy as np
| [
"[email protected]"
] | |
42189efd80b229fc640392070b1616e693ff7ad2 | 6817457f2f7cb635e84d5ac23c76873628fb04cf | /src/dama/models.py | 5bf0b6d11db89966aa4c8e2ce8e34ed97f31c38b | [
"Apache-2.0"
] | permissive | elaeon/dama_ml | 5d9a63e0daabe332a08b13813de57d9ed2608015 | 8b56c62a28c69987fc5dbd8a47406a3a22214371 | refs/heads/master | 2021-10-26T05:24:10.166028 | 2019-04-11T00:55:44 | 2019-04-11T00:55:44 | 58,218,206 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,906 | py | import os
import numpy as np
from abc import ABC, abstractmethod
from dama.data.ds import Data
from dama.data.it import Iterator, BatchIterator
from dama.utils.files import check_or_create_path_dir
from dama.measures import ListMeasure
from dama.utils.logger import log_config
from dama.utils.config import get_settings
from dama.abc.conn import AbsConn
from dama.drivers.sqlite import Sqlite
from dama.utils.core import Login, Metadata
from dama.utils.files import rm
import json
settings = get_settings("paths")
settings.update(get_settings("vars"))
log = log_config(__name__)
class MLModel:
def __init__(self, fit_fn=None, predictors=None, load_fn=None, save_fn=None,
input_transform=None, model=None, to_json_fn=None):
self.fit_fn = fit_fn
self.predictors = predictors
self.load_fn = load_fn
self.save_fn = save_fn
self.to_json_fn = to_json_fn
if input_transform is None:
self.input_transform = lambda x: x
else:
self.input_transform = input_transform
self.model = model
def fit(self, *args, **kwargs):
return self.fit_fn(*args, **kwargs)
def predict(self, data: AbsConn, output_format_fn=None, output=None, batch_size: int = 258):
def _it(data):
data = self.input_transform(data)
if batch_size > 0:
data = Iterator(data).batchs(chunks=(batch_size, ))
for slice_obj in data:
batch = slice_obj.batch.to_ndarray()
predict = self.predictors(batch)
yield output_format_fn(predict)
else:
for row in data:
batch = row.to_ndarray().reshape(1, -1)
predict = self.predictors(batch)
yield output_format_fn(predict, output=output)[0]
if batch_size > 0:
return BatchIterator.from_batchs(_it(data), dtypes=data.dtypes, length=data.size,
from_batch_size=batch_size, to_slice=True)
else:
return Iterator(_it(data), length=data.size)
def load(self, path):
return self.load_fn(path)
def save(self, path):
return self.save_fn(path)
def to_json(self) -> dict:
if self.to_json_fn is not None:
return self.to_json_fn()
return {}
class MetadataX(object):
metaext = "json"
@staticmethod
def save_json(file_path, data):
with open(file_path, "w") as f:
json.dump(data, f)
@staticmethod
def load_json(path):
try:
with open(path, 'r') as f:
data = json.load(f)
return data
except IOError as e:
log.error(e)
return {}
except Exception as e:
log.error("{} {}".format(e, path))
@staticmethod
def get_metadata(path_metadata_version: str = None):
metadata = {}
if path_metadata_version is not None:
metadata["train"] = MetadataX.load_json(path_metadata_version)
else:
metadata["train"] = {}
return metadata
@staticmethod
def make_model_version_file(name, path, classname, ext, model_version):
model_name_v = "version.{}".format(model_version)
check_point = check_or_create_path_dir(path, classname)
destination = check_or_create_path_dir(check_point, name)
model = check_or_create_path_dir(destination, model_name_v)
filename = os.path.join(model, "meta")
return "{}.{}".format(filename, ext)
class BaseModel(MetadataX, ABC):
def __init__(self, metrics=None, metadata_path=None):
self.model_name = None
self.group_name = None
self.model_version = None
self.base_path = None
self.path_model_version = None
self.path_metadata_version = None
self.model = None
self.ext = "ckpt.pkl"
self.metrics = metrics
self.model_params = None
self.num_steps = None
self.model_version = None
self.ds = None
self.data_groups = None
self.model_params = None
self.num_steps = None
self.batch_size = None
if metadata_path is None:
self.metadata_path = settings["metadata_path"]
else:
self.metadata_path = metadata_path
super(BaseModel, self).__init__()
@abstractmethod
def scores(self, measures=None, batch_size=2000):
return NotImplemented
@abstractmethod
def output_format(self, prediction, output=None):
return NotImplemented
@abstractmethod
def prepare_model(self, obj_fn=None, num_steps=None, model_params=None, batch_size: int = None) -> MLModel:
return NotImplemented
@abstractmethod
def load_fn(self, path):
return NotImplemented
def predict(self, data, output=None, batch_size: int = 258):
return self.model.predict(data, output_format_fn=self.output_format, output=output, batch_size=batch_size)
def metadata_model(self):
return {
"group_name": self.group_name,
"model_module": self.module_cls_name(),
"name": self.model_name,
"base_path": self.base_path,
"hash": self.ds.hash,
"from_ds": self.ds.from_ds_hash
}
def metadata_train(self):
return {
"model_version": self.model_version,
"hyperparams": self.model_params,
"num_steps": self.num_steps,
"score": self.scores(measures=self.metrics).measures_to_dict(),
"batch_size": self.batch_size,
"model_json": self.model.to_json(),
"data_groups": self.data_groups,
}
def __enter__(self):
self.ds = self.get_dataset()
self.ds.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.ds.close()
def get_dataset(self) -> Data:
log.debug("LOADING DS FOR MODEL: {} {} {} {}".format(self.cls_name(), self.model_name,
self.model_version, self.base_path))
group_name = "s/n" if self.group_name is None else self.group_name
with Metadata(Sqlite(login=Login(table=settings["model_tag"]), path=self.metadata_path)) as metadata:
query = "SELECT hash FROM {} WHERE name=? AND version=? AND model_module=? AND group_name=? AND base_path=?".format(
settings["model_tag"])
data_hash = metadata.query(query,
(self.model_name, self.model_version, self.module_cls_name(),
group_name, self.base_path))
if len(data_hash) > 0:
driver = Sqlite(login=Login(table=settings["data_tag"]), path=self.metadata_path)
with Data.load(data_hash[0][0], metadata_driver=driver) as dataset:
pass
return dataset
def preload_model(self):
self.model = MLModel(fit_fn=None, predictors=None, load_fn=self.load_fn, save_fn=None)
def write_metadata(self):
metadata_driver = Sqlite(login=Login(table=settings["model_tag"]), path=self.metadata_path)
metadata = self.metadata_model()
metadata_train = self.metadata_train()
metadata["version"] = self.model_version
metadata["model_path"] = self.path_model_version
metadata["metadata_path_train"] = self.path_metadata_version
with Metadata(metadata_driver, metadata) as metadata:
dtypes = np.dtype([("hash", object), ("name", object), ("model_path", object), ("group_name", object),
("is_valid", bool), ("version", int), ("model_module", object), ("score_name", object),
("score", float), ("metadata_path_train", object), ("base_path", object),
("from_ds", object)])
metadata["is_valid"] = True
metadata["group_name"] = "s/n" if self.group_name is None else self.group_name
keys = ["base_path", "name", "group_name", "version", "model_module", "score_name"]
metadata.set_schema(dtypes, unique_key=[keys])
if len(metadata_train["score"]) == 0:
metadata["score_name"] = "s/n"
metadata["score"] = 0
metadata.insert_update_data(keys=[keys])
else:
for score_name in metadata_train["score"].keys():
if score_name != "":
metadata["score_name"] = score_name
metadata["score"] = metadata_train["score"][score_name]["values"][0]
metadata.insert_update_data(keys=[keys])
def save(self, name, path: str = None, model_version="1"):
self.model_version = model_version
self.model_name = name
if path is None:
self.base_path = settings["models_path"]
else:
self.base_path = path
self.path_metadata_version = MetadataX.make_model_version_file(name, self.base_path, self.cls_name(),
self.metaext, self.model_version)
self.path_model_version = MetadataX.make_model_version_file(name, self.base_path, self.cls_name(), self.ext,
model_version=model_version)
log.debug("SAVING model's data")
self.model.save(self.path_model_version)
log.debug("SAVING json metadata train info")
metadata_train = self.metadata_train()
MetadataX.save_json(self.path_metadata_version, metadata_train)
self.write_metadata()
def load_model(self):
self.preload_model()
if self.path_model_version is not None:
self.model.load(self.path_model_version)
def load_metadata(self, path_metadata_version):
metadata = MetadataX.get_metadata(path_metadata_version)
self.model_version = metadata["train"]["model_version"]
self.model_params = metadata["train"]["hyperparams"]
self.num_steps = metadata["train"]["num_steps"]
self.batch_size = metadata["train"]["batch_size"]
self.data_groups = metadata["train"]["data_groups"]
@abstractmethod
def train(self, ds: Data, batch_size: int = 0, num_steps: int = 0, n_splits=None, obj_fn=None,
model_params: dict = None, data_train_group="train_x", target_train_group='train_y',
data_test_group="test_x", target_test_group='test_y', data_validation_group="validation_x",
target_validation_group="validation_y"):
return NotImplemented
def scores2table(self):
meta = MetadataX.get_metadata(self.path_metadata_version)
try:
scores = meta["train"]["score"]
except KeyError:
return
else:
return ListMeasure.dict_to_measures(scores)
@classmethod
def load(cls, model_name: str, model_version: str, group_name: str = None, path: str = None,
metadata_path: str = None):
model = cls(metadata_path=metadata_path)
model.model_name = model_name
model.model_version = model_version
if group_name is None:
group_name = "s/n"
model.group_name = group_name
model.base_path = path
path_metadata_version = MetadataX.make_model_version_file(model_name, path, model.cls_name(),
model.metaext, model_version=model_version)
model.path_metadata_version = path_metadata_version
model.path_model_version = MetadataX.make_model_version_file(model_name, path, model.cls_name(),
model.ext, model_version=model_version)
model.load_metadata(path_metadata_version)
model.load_model()
return model
def destroy(self):
if self.path_metadata_version is not None:
rm(self.path_model_version)
rm(self.path_metadata_version)
if hasattr(self, 'ds'):
self.ds.destroy()
@classmethod
def cls_name(cls):
return cls.__name__
@classmethod
def module_cls_name(cls):
return "{}.{}".format(cls.__module__, cls.__name__)
class SupervicedModel(BaseModel):
def __init__(self, metrics=None, metadata_path=None):
super(SupervicedModel, self).__init__(metrics=metrics, metadata_path=metadata_path)
def train(self, ds: Data, batch_size: int = 0, num_steps: int = 0, n_splits=None, obj_fn=None,
model_params: dict = None, data_train_group="train_x", target_train_group='train_y',
data_test_group="test_x", target_test_group='test_y', data_validation_group="validation_x",
target_validation_group="validation_y"):
self.ds = ds
log.info("Training")
self.model_params = model_params
self.num_steps = num_steps
self.batch_size = batch_size
self.data_groups = {
"data_train_group": data_train_group, "target_train_group": target_train_group,
"data_test_group": data_test_group, "target_test_group": target_test_group,
"data_validation_group": data_validation_group, "target_validation_group": target_validation_group
}
self.model = self.prepare_model(obj_fn=obj_fn, num_steps=num_steps, model_params=model_params,
batch_size=batch_size)
class UnsupervisedModel(BaseModel):
def train(self, ds: Data, batch_size: int = 0, num_steps: int = 0, n_splits=None, obj_fn=None,
model_params: dict = None, data_train_group="train_x", target_train_group='train_y',
data_test_group="test_x", target_test_group='test_y', data_validation_group="validation_x",
target_validation_group="validation_y"):
self.ds = ds
log.info("Training")
self.model_params = model_params
self.num_steps = num_steps
self.batch_size = batch_size
self.data_groups = {
"data_train_group": data_train_group, "target_train_group": target_train_group,
"data_test_group": data_test_group, "target_test_group": target_test_group,
"data_validation_group": data_validation_group, "target_validation_group": target_validation_group
}
self.model = self.prepare_model(obj_fn=obj_fn, num_steps=num_steps, model_params=model_params,
batch_size=batch_size)
def scores(self, measures=None, batch_size: int = 258) -> ListMeasure:
return ListMeasure()
def output_format(self, prediction, output=None):
return prediction
| [
"[email protected]"
] | |
d865d3d1332562451b41a4baa2d70e83acabd738 | ee0b9cd2424e634a212a6d9734af1eaedd40dfe0 | /jhub38_dram_mcxtrace/sasview-5.0.4/src/sas/qtgui/MainWindow/UnitTesting/DataExplorerTest.py | 03240317cca800fabec43b8d52eb7f4de6dbf3b0 | [
"BSD-3-Clause"
] | permissive | moving-northwards/Docker | 775755b4618c1a7946f540505b0178e119d294d1 | 8ef18fd8c6abb0608ce9b53187e53d00d3e4e9ae | refs/heads/master | 2023-05-26T08:42:58.634525 | 2021-06-15T08:41:08 | 2021-06-15T08:41:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,243 | py | import sys
import time
import unittest
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtTest import QTest
from PyQt5.QtCore import *
from unittest.mock import MagicMock
from unittest.mock import patch
from mpl_toolkits.mplot3d import Axes3D
# set up import paths
import path_prepare
# Local
from sas.qtgui.Plotting.PlotterData import Data1D, Data2D
from sas.sascalc.dataloader.loader import Loader
from sas.qtgui.MainWindow.DataManager import DataManager
from sas.qtgui.MainWindow.DataExplorer import DataExplorerWindow
from sas.qtgui.MainWindow.GuiManager import GuiManager
from sas.qtgui.Utilities.GuiUtils import *
from sas.qtgui.UnitTesting.TestUtils import QtSignalSpy
from sas.qtgui.Plotting.Plotter import Plotter
from sas.qtgui.Plotting.Plotter2D import Plotter2D
import sas.qtgui.Plotting.PlotHelper as PlotHelper
from sas.sasview import __version__ as SASVIEW_VERSION
if not QApplication.instance():
app = QApplication(sys.argv)
class MyPerspective(object):
def __init__(self):
self.name = "Dummy Perspective"
def communicator(self):
return Communicate()
def allowBatch(self):
return True
def allowSwap(self):
return True
def setData(self, data_item=None, is_batch=False):
return None
def swapData(self, data_item=None, is_batch=False):
return None
def title(self):
return self.name
class dummy_manager(object):
def __init__(self):
self._perspective = MyPerspective()
def communicator(self):
return Communicate()
def perspective(self):
return self._perspective
def workspace(self):
return None
class _parent(object):
screen_width = 1024
screen_height = 768
class DataExplorerTest(unittest.TestCase):
'''Test the Data Explorer GUI'''
def setUp(self):
'''Create the GUI'''
self.form = DataExplorerWindow(None, dummy_manager())
def tearDown(self):
'''Destroy the GUI'''
self.form.close()
self.form = None
def testDefaults(self):
'''Test the GUI in its default state'''
# Tab widget
self.assertIsInstance(self.form, QTabWidget)
self.assertEqual(self.form.count(), 2)
# Buttons - data tab
self.assertEqual(self.form.cmdLoad.text(), "Load data")
self.assertEqual(self.form.cmdDeleteData.text(), "Delete Data")
self.assertEqual(self.form.cmdDeleteTheory.text(), "Delete")
self.assertEqual(self.form.cmdFreeze.text(), "Freeze Theory")
self.assertEqual(self.form.cmdSendTo.text(), "Send data to")
self.assertEqual(self.form.cmdSendTo.iconSize(), QSize(32, 32))
self.assertIsInstance(self.form.cmdSendTo.icon(), QIcon)
self.assertEqual(self.form.chkBatch.text(), "Batch mode")
self.assertFalse(self.form.chkBatch.isChecked())
self.assertEqual(self.form.chkSwap.text(), "Swap data")
self.assertFalse(self.form.chkSwap.isChecked())
# Buttons - theory tab
# Combo boxes
self.assertEqual(self.form.cbSelect.count(), 6)
self.assertEqual(self.form.cbSelect.currentIndex(), 0)
# Models - data
self.assertIsInstance(self.form.model, QStandardItemModel)
self.assertEqual(self.form.treeView.model().rowCount(), 0)
self.assertEqual(self.form.treeView.model().columnCount(), 0)
self.assertEqual(self.form.model.rowCount(), 0)
self.assertEqual(self.form.model.columnCount(), 0)
self.assertIsInstance(self.form.data_proxy, QSortFilterProxyModel)
self.assertEqual(self.form.data_proxy.sourceModel(), self.form.model)
self.assertEqual("[^()]", str(self.form.data_proxy.filterRegExp().pattern()))
self.assertIsInstance(self.form.treeView, QTreeView)
# Models - theory
self.assertIsInstance(self.form.theory_model, QStandardItemModel)
self.assertEqual(self.form.freezeView.model().rowCount(), 0)
self.assertEqual(self.form.freezeView.model().columnCount(), 0)
self.assertEqual(self.form.theory_model.rowCount(), 0)
self.assertEqual(self.form.theory_model.columnCount(), 0)
self.assertIsInstance(self.form.theory_proxy, QSortFilterProxyModel)
self.assertEqual(self.form.theory_proxy.sourceModel(), self.form.theory_model)
self.assertEqual("[^()]", str(self.form.theory_proxy.filterRegExp().pattern()))
self.assertIsInstance(self.form.freezeView, QTreeView)
def testWidgets(self):
"""
Test if all required widgets got added
"""
def testLoadButton(self):
loadButton = self.form.cmdLoad
filename = "cyl_400_20.txt"
# Initialize signal spy instances
spy_file_read = QtSignalSpy(self.form, self.form.communicator.fileReadSignal)
# Return no files.
QFileDialog.getOpenFileNames = MagicMock(return_value=('',''))
# Click on the Load button
QTest.mouseClick(loadButton, Qt.LeftButton)
# Test the getOpenFileName() dialog called once
self.assertTrue(QFileDialog.getOpenFileNames.called)
QFileDialog.getOpenFileNames.assert_called_once()
# Make sure the signal has not been emitted
self.assertEqual(spy_file_read.count(), 0)
# Now, return a single file
QFileDialog.getOpenFileNames = MagicMock(return_value=(filename,''))
# Click on the Load button
QTest.mouseClick(loadButton, Qt.LeftButton)
qApp.processEvents()
# Test the getOpenFileName() dialog called once
self.assertTrue(QFileDialog.getOpenFileNames.called)
QFileDialog.getOpenFileNames.assert_called_once()
# Expected one spy instance
#self.assertEqual(spy_file_read.count(), 1)
#self.assertIn(filename, str(spy_file_read.called()[0]['args'][0]))
def testLoadFiles(self):
"""
Test progress bar update while loading of multiple files
"""
# Set up the spy on progress bar update signal
spy_progress_bar_update = QtSignalSpy(self.form,
self.form.communicator.progressBarUpdateSignal)
# Populate the model
filename = ["cyl_400_20.txt", "P123_D2O_10_percent.dat", "cyl_400_20.txt"]
self.form.readData(filename)
# 0, 0, 33, 66, -1 -> 5 signals reaching progressBar
self.assertEqual(spy_progress_bar_update.count(), 5)
expected_list = [0, 0, 33, 66, -1]
spied_list = [spy_progress_bar_update.called()[i]['args'][0] for i in range(5)]
self.assertEqual(expected_list, spied_list)
def testDeleteButton(self):
"""
Functionality of the delete button
"""
deleteButton = self.form.cmdDeleteData
# Mock the confirmation dialog with return=No
QMessageBox.question = MagicMock(return_value=QMessageBox.No)
# Populate the model
filename = ["cyl_400_20.txt", "cyl_400_20.txt", "cyl_400_20.txt"]
self.form.readData(filename)
# Assure the model contains three items
self.assertEqual(self.form.model.rowCount(), 3)
# Assure the checkboxes are on
item1 = self.form.model.item(0)
item2 = self.form.model.item(1)
item3 = self.form.model.item(2)
self.assertTrue(item1.checkState() == Qt.Checked)
self.assertTrue(item2.checkState() == Qt.Checked)
self.assertTrue(item3.checkState() == Qt.Checked)
# Click on the delete button
QTest.mouseClick(deleteButton, Qt.LeftButton)
# Test the warning dialog called once
self.assertTrue(QMessageBox.question.called)
# Assure the model still contains the items
self.assertEqual(self.form.model.rowCount(), 3)
# Now, mock the confirmation dialog with return=Yes
QMessageBox.question = MagicMock(return_value=QMessageBox.Yes)
# Click on the delete button
QTest.mouseClick(deleteButton, Qt.LeftButton)
# Test the warning dialog called once
self.assertTrue(QMessageBox.question.called)
# Assure the model contains no items
self.assertEqual(self.form.model.rowCount(), 0)
# Click delete once again to assure no nasty behaviour on empty model
QTest.mouseClick(deleteButton, Qt.LeftButton)
def testDeleteTheory(self):
"""
Test that clicking "Delete" in theories tab removes selected indices
"""
deleteButton = self.form.cmdDeleteTheory
# Mock the confirmation dialog with return=No
QMessageBox.question = MagicMock(return_value=QMessageBox.No)
# Populate the model
item1 = HashableStandardItem(True)
item1.setCheckable(True)
item1.setCheckState(Qt.Checked)
item1.setText("item 1")
self.form.theory_model.appendRow(item1)
item2 = HashableStandardItem(True)
item2.setCheckable(True)
item2.setCheckState(Qt.Unchecked)
item2.setText("item 2")
self.form.theory_model.appendRow(item2)
# Assure the model contains two items
self.assertEqual(self.form.theory_model.rowCount(), 2)
# Assure the checkboxes are on
self.assertTrue(item1.checkState() == Qt.Checked)
self.assertTrue(item2.checkState() == Qt.Unchecked)
# Click on the delete button
QTest.mouseClick(deleteButton, Qt.LeftButton)
# Test the warning dialog called once
self.assertTrue(QMessageBox.question.called)
# Assure the model still contains the items
self.assertEqual(self.form.theory_model.rowCount(), 2)
# Now, mock the confirmation dialog with return=Yes
QMessageBox.question = MagicMock(return_value=QMessageBox.Yes)
# Click on the delete button
QTest.mouseClick(deleteButton, Qt.LeftButton)
# Test the warning dialog called once
self.assertTrue(QMessageBox.question.called)
# Assure the model contains 1 item
self.assertEqual(self.form.theory_model.rowCount(), 1)
# Set the remaining item to checked
self.form.theory_model.item(0).setCheckState(Qt.Checked)
# Click on the delete button again
QTest.mouseClick(deleteButton, Qt.LeftButton)
# Assure the model contains no items
self.assertEqual(self.form.theory_model.rowCount(), 0)
# Click delete once again to assure no nasty behaviour on empty model
QTest.mouseClick(deleteButton, Qt.LeftButton)
def testSendToButton(self):
"""
Test that clicking the Send To button sends checked data to a perspective
"""
# Send empty data
mocked_perspective = self.form._perspective()
mocked_perspective.setData = MagicMock()
# Click on the Send To button
QTest.mouseClick(self.form.cmdSendTo, Qt.LeftButton)
# The set_data method not called
self.assertFalse(mocked_perspective.setData.called)
# Populate the model
filename = ["cyl_400_20.txt"]
self.form.readData(filename)
QApplication.processEvents()
# setData is the method we want to see called
mocked_perspective.swapData = MagicMock()
# Assure the checkbox is on
self.form.cbSelect.setCurrentIndex(0)
# Click on the Send To button
QTest.mouseClick(self.form.cmdSendTo, Qt.LeftButton)
QApplication.processEvents()
# Test the set_data method called
self.assertTrue(mocked_perspective.setData.called)
self.assertFalse(mocked_perspective.swapData.called)
# Now select the swap data checkbox
self.form.chkSwap.setChecked(True)
# Click on the Send To button
QTest.mouseClick(self.form.cmdSendTo, Qt.LeftButton)
QApplication.processEvents()
# Now the swap data method should be called
self.assertTrue(mocked_perspective.setData.called_once)
self.assertTrue(mocked_perspective.swapData.called)
# Test the exception block
QMessageBox.exec_ = MagicMock()
QMessageBox.setText = MagicMock()
mocked_perspective.swapData = MagicMock(side_effect = Exception("foo"))
# Click on the button to so the mocked swapData method raises an exception
QTest.mouseClick(self.form.cmdSendTo, Qt.LeftButton)
# Assure the message box popped up
QMessageBox.exec_.assert_called_once()
# With the right message
QMessageBox.setText.assert_called_with("foo")
# open another file
filename = ["cyl_400_20.txt"]
self.form.readData(filename)
# Mock the warning message and the swapData method
QMessageBox.exec_ = MagicMock()
QMessageBox.setText = MagicMock()
mocked_perspective.swapData = MagicMock()
# Click on the button to swap both datasets to the perspective
QTest.mouseClick(self.form.cmdSendTo, Qt.LeftButton)
# Assure the message box popped up
QMessageBox.exec_.assert_called_once()
# With the right message
QMessageBox.setText.assert_called_with(
"Dummy Perspective does not allow replacing multiple data.")
def testDataSelection(self):
"""
Tests the functionality of the Selection Option combobox
"""
# Populate the model with 1d and 2d data
filename = ["cyl_400_20.txt", "P123_D2O_10_percent.dat"]
self.form.readData(filename)
# Wait a moment for data to load
time.sleep(1)
# Unselect all data
self.form.cbSelect.activated.emit(1)
# Test the current selection
item1D = self.form.model.item(0)
item2D = self.form.model.item(1)
self.assertTrue(item1D.checkState() == Qt.Unchecked)
self.assertTrue(item2D.checkState() == Qt.Unchecked)
# Select all data
self.form.cbSelect.activated.emit(0)
# Test the current selection
self.assertTrue(item1D.checkState() == Qt.Checked)
self.assertTrue(item2D.checkState() == Qt.Checked)
# select 1d data
self.form.cbSelect.activated.emit(2)
# Test the current selection
self.assertTrue(item1D.checkState() == Qt.Checked)
self.assertTrue(item2D.checkState() == Qt.Checked)
# unselect 1d data
self.form.cbSelect.activated.emit(3)
# Test the current selection
self.assertTrue(item1D.checkState() == Qt.Unchecked)
self.assertTrue(item2D.checkState() == Qt.Checked)
# select 2d data
self.form.cbSelect.activated.emit(4)
# Test the current selection
self.assertTrue(item1D.checkState() == Qt.Unchecked)
self.assertTrue(item2D.checkState() == Qt.Checked)
# unselect 2d data
self.form.cbSelect.activated.emit(5)
# Test the current selection
self.assertTrue(item1D.checkState() == Qt.Unchecked)
self.assertTrue(item2D.checkState() == Qt.Unchecked)
def testFreezeTheory(self):
"""
Assure theory freeze functionality works
"""
# Not yet tested - agree on design first.
pass
def testRecursivelyCloneItem(self):
"""
Test the rescursive QAbstractItem/QStandardItem clone
"""
# Create an item with several branches
item1 = QStandardItem()
item2 = QStandardItem()
item3 = QStandardItem()
item4 = QStandardItem()
item5 = QStandardItem()
item6 = QStandardItem()
item4.appendRow(item5)
item2.appendRow(item4)
item2.appendRow(item6)
item1.appendRow(item2)
item1.appendRow(item3)
# Clone
new_item = self.form.recursivelyCloneItem(item1)
# assure the trees look identical
self.assertEqual(item1.rowCount(), new_item.rowCount())
self.assertEqual(item1.child(0).rowCount(), new_item.child(0).rowCount())
self.assertEqual(item1.child(1).rowCount(), new_item.child(1).rowCount())
self.assertEqual(item1.child(0).child(0).rowCount(), new_item.child(0).child(0).rowCount())
def testReadData(self):
"""
Test the low level readData() method
"""
filename = ["cyl_400_20.txt"]
self.form.manager.add_data = MagicMock()
# Initialize signal spy instances
spy_status_update = QtSignalSpy(self.form, self.form.communicator.statusBarUpdateSignal)
spy_data_received = QtSignalSpy(self.form, self.form.communicator.fileDataReceivedSignal)
# Read in the file
self.form.readData(filename)
# Expected two status bar updates
self.assertEqual(spy_status_update.count(), 2)
self.assertIn(filename[0], str(spy_status_update.called()[0]['args'][0]))
# Check that the model contains the item
self.assertEqual(self.form.model.rowCount(), 1)
self.assertEqual(self.form.model.columnCount(), 1)
# The 0th item header should be the name of the file
model_item = self.form.model.index(0,0)
model_name = self.form.model.data(model_item)
self.assertEqual(model_name, filename[0])
def skip_testDisplayHelp(self): # Skip due to help path change
"""
Test that the Help window gets shown correctly
"""
partial_url = "qtgui/MainWindow/data_explorer_help.html"
button1 = self.form.cmdHelp
button2 = self.form.cmdHelp_2
# Click on the Help button
QTest.mouseClick(button1, Qt.LeftButton)
qApp.processEvents()
# Check the browser
self.assertIn(partial_url, str(self.form._helpView.url()))
# Close the browser
self.form._helpView.close()
# Click on the Help_2 button
QTest.mouseClick(button2, Qt.LeftButton)
qApp.processEvents()
# Check the browser
self.assertIn(partial_url, str(self.form._helpView.url()))
def testLoadFile(self):
"""
Test the threaded call to readData()
"""
#self.form.loadFile()
pass
def testGetWList(self):
"""
Test the list of known extensions
"""
w_list = self.form.getWlist()
defaults = 'All (*.*);;canSAS files (*.xml);;SESANS files' +\
' (*.ses);;ASCII files (*.txt);;' +\
'IGOR/DAT 2D Q_map files (*.dat);;IGOR 1D files (*.abs);;'+\
'DANSE files (*.sans)'
default_list = defaults.split(';;')
for def_format in default_list:
self.assertIn(def_format, w_list)
def testLoadComplete(self):
"""
Test the callback method updating the data object
"""
message="Loading Data Complete"
data_dict = {"a1":Data1D()}
output_data = (data_dict, message)
self.form.manager.add_data = MagicMock()
# Initialize signal spy instances
spy_status_update = QtSignalSpy(self.form, self.form.communicator.statusBarUpdateSignal)
spy_data_received = QtSignalSpy(self.form, self.form.communicator.fileDataReceivedSignal)
# Read in the file
self.form.loadComplete(output_data)
# "Loading data complete" no longer sent in LoadFile but in callback
self.assertIn("Loading Data Complete", str(spy_status_update.called()[0]['args'][0]))
# Expect one Data Received signal
self.assertEqual(spy_data_received.count(), 1)
# Assure returned dictionary has correct data
# We don't know the data ID, so need to iterate over dict
data_dict = spy_data_received.called()[0]['args'][0]
for data_key, data_value in data_dict.items():
self.assertIsInstance(data_value, Data1D)
# Assure add_data on data_manager was called (last call)
self.assertTrue(self.form.manager.add_data.called)
@patch('sas.qtgui.Utilities.GuiUtils.plotsFromCheckedItems')
def testNewPlot1D(self, test_patch):
"""
Creating new plots from Data1D/2D
"""
loader = Loader()
manager = DataManager()
PlotHelper.clear()
self.form.enableGraphCombo(None)
# Make sure the controls are disabled
self.assertFalse(self.form.cbgraph.isEnabled())
self.assertFalse(self.form.cmdAppend.isEnabled())
# get Data1D
p_file="cyl_400_20.txt"
output_object = loader.load(p_file)
new_data = [(None, manager.create_gui_data(output_object[0], p_file))]
_, test_data = new_data[0]
self.assertTrue(f'Data file generated by SasView v{SASVIEW_VERSION}' in
test_data.notes)
# Mask retrieval of the data
test_patch.return_value = new_data
# Mask plotting
self.form.parent.workspace = MagicMock()
# Call the plotting method
self.form.newPlot()
time.sleep(1)
QApplication.processEvents()
# The plot was registered
self.assertEqual(len(PlotHelper.currentPlots()), 1)
self.assertTrue(self.form.cbgraph.isEnabled())
self.assertTrue(self.form.cmdAppend.isEnabled())
@patch('sas.qtgui.Utilities.GuiUtils.plotsFromCheckedItems')
def testNewPlot2D(self, test_patch):
"""
Creating new plots from Data1D/2D
"""
loader = Loader()
manager = DataManager()
PlotHelper.clear()
self.form.enableGraphCombo(None)
# Make sure the controls are disabled
self.assertFalse(self.form.cbgraph.isEnabled())
self.assertFalse(self.form.cmdAppend.isEnabled())
# get Data2D
p_file="P123_D2O_10_percent.dat"
output_object = loader.load(p_file)
new_data = [(None, manager.create_gui_data(output_object[0], p_file))]
# Mask retrieval of the data
test_patch.return_value = new_data
# Mask plotting
self.form.parent.workspace = MagicMock()
# Call the plotting method
#self.form.newPlot()
#QApplication.processEvents()
# The plot was registered
#self.assertEqual(len(PlotHelper.currentPlots()), 1)
#self.assertTrue(self.form.cbgraph.isEnabled())
#self.assertTrue(self.form.cmdAppend.isEnabled())
@patch('sas.qtgui.Utilities.GuiUtils.plotsFromCheckedItems')
def testAppendPlot(self, test_patch):
"""
Creating new plots from Data1D/2D
"""
loader = Loader()
manager = DataManager()
PlotHelper.clear()
self.form.enableGraphCombo(None)
# Make sure the controls are disabled
self.assertFalse(self.form.cbgraph.isEnabled())
self.assertFalse(self.form.cmdAppend.isEnabled())
# get Data1D
p_file="cyl_400_20.txt"
output_object = loader.load(p_file)
output_item = QStandardItem()
new_data = [(output_item, manager.create_gui_data(output_object[0], p_file))]
# Mask plotting
self.form.parent.workspace = MagicMock()
# Mask the plot show call
Plotter.show = MagicMock()
# Mask retrieval of the data
test_patch.return_value = new_data
# Call the plotting method
self.form.newPlot()
# Call the plotting method again, so we have 2 graphs
self.form.newPlot()
QApplication.processEvents()
# See that we have two plots
self.assertEqual(len(PlotHelper.currentPlots()), 2)
# Add data to plot #1
self.form.cbgraph.setCurrentIndex(1)
self.form.appendPlot()
# See that we still have two plots
self.assertEqual(len(PlotHelper.currentPlots()), 2)
def testUpdateGraphCombo(self):
"""
Test the combo box update
"""
PlotHelper.clear()
graph_list=["1","2","3"]
self.form.updateGraphCombo(graph_list)
self.assertEqual(self.form.cbgraph.count(), 3)
self.assertEqual(self.form.cbgraph.currentText(), '1')
graph_list=[]
self.form.updateGraphCombo(graph_list)
self.assertEqual(self.form.cbgraph.count(), 0)
def testUpdateModelFromPerspective(self):
"""
Assure the model update is correct
"""
good_item = QStandardItem()
bad_item = "I'm so bad"
self.form.model.reset = MagicMock()
self.form.updateModelFromPerspective(good_item)
# See that the model got reset
# self.form.model.reset.assert_called_once()
# See that the bad item causes raise
with self.assertRaises(Exception):
self.form.updateModelFromPerspective(bad_item)
def testContextMenu(self):
"""
See if the context menu is present
"""
# get Data1D
p_file=["cyl_400_20.txt"]
# Read in the file
output, message = self.form.readData(p_file)
self.form.loadComplete((output, message))
# Pick up the treeview index corresponding to that file
index = self.form.treeView.indexAt(QPoint(5,5))
self.form.show()
# Find out the center pointof the treeView row
rect = self.form.treeView.visualRect(index).center()
self.form.context_menu.exec_ = MagicMock()
# Move the mouse pointer to the first row
QTest.mouseMove(self.form.treeView.viewport(), pos=rect)
# This doesn't invoke the action/signal. Investigate why?
# QTest.mouseClick(self.form.treeView.viewport(), Qt.RightButton, pos=rect)
# Instead, send the signal directly
self.form.treeView.customContextMenuRequested.emit(rect)
# See that the menu has been shown
self.form.context_menu.exec_.assert_called_once()
def baseNameStateCheck(self):
"""
Helper method for the Name Change Tests Below - Check the base state of the window
"""
self.assertTrue(hasattr(self.form, "nameChangeBox"))
self.assertTrue(self.form.nameChangeBox.isModal())
self.assertEqual(self.form.nameChangeBox.windowTitle(), "Display Name Change")
self.assertFalse(self.form.nameChangeBox.isVisible())
self.assertIsNone(self.form.nameChangeBox.data)
self.assertIsNone(self.form.nameChangeBox.model_item)
self.assertFalse(self.form.nameChangeBox.txtCurrentName.isEnabled())
self.assertFalse(self.form.nameChangeBox.txtDataName.isEnabled())
self.assertFalse(self.form.nameChangeBox.txtFileName.isEnabled())
self.assertFalse(self.form.nameChangeBox.txtNewCategory.isEnabled())
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), "")
self.assertEqual(self.form.nameChangeBox.txtDataName.text(), "")
self.assertEqual(self.form.nameChangeBox.txtFileName.text(), "")
self.assertEqual(self.form.nameChangeBox.txtNewCategory.text(), "")
def testNameChange(self):
"""
Test the display name change routines
"""
# Define Constants
FILE_NAME = "cyl_400_20.txt"
FILE_NAME_APPENDED = FILE_NAME + " [1]"
TEST_STRING_1 = "test value change"
TEST_STRING_2 = "TEST VALUE CHANGE"
# Test base state of the name change window
self.baseNameStateCheck()
# Get Data1D
p_file=[FILE_NAME]
# Read in the file
output, message = self.form.readData(p_file)
key = list(output.keys())
output[key[0]].title = TEST_STRING_1
self.form.loadComplete((output, message))
# select the data and run name change routine
self.form.treeView.selectAll()
self.form.changeName()
# Test window state after adding data
self.assertTrue(self.form.nameChangeBox.isVisible())
self.assertIsNotNone(self.form.nameChangeBox.data)
self.assertIsNotNone(self.form.nameChangeBox.model_item)
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), FILE_NAME)
self.assertEqual(self.form.nameChangeBox.txtDataName.text(), TEST_STRING_1)
self.assertEqual(self.form.nameChangeBox.txtFileName.text(), FILE_NAME)
self.assertTrue(self.form.nameChangeBox.rbExisting.isChecked())
self.assertFalse(self.form.nameChangeBox.rbDataName.isChecked())
self.assertFalse(self.form.nameChangeBox.rbFileName.isChecked())
self.assertFalse(self.form.nameChangeBox.rbNew.isChecked())
# Take the existing name
self.form.nameChangeBox.cmdOK.click()
self.form.changeName()
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), FILE_NAME)
# Take the title
self.form.nameChangeBox.rbDataName.setChecked(True)
self.assertFalse(self.form.nameChangeBox.rbExisting.isChecked())
self.form.nameChangeBox.cmdOK.click()
self.form.changeName()
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), TEST_STRING_1)
# Take the file name again
self.form.nameChangeBox.rbFileName.setChecked(True)
self.assertFalse(self.form.nameChangeBox.rbExisting.isChecked())
self.form.nameChangeBox.cmdOK.click()
self.form.changeName()
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), FILE_NAME_APPENDED)
# Take the user-defined name, which is empty - should retain existing value
self.form.nameChangeBox.rbNew.setChecked(True)
self.assertFalse(self.form.nameChangeBox.rbExisting.isChecked())
self.form.nameChangeBox.cmdOK.click()
self.form.changeName()
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), FILE_NAME_APPENDED)
# Take a different user-defined name
self.form.nameChangeBox.rbNew.setChecked(True)
self.form.nameChangeBox.txtNewCategory.setText(TEST_STRING_2)
self.assertFalse(self.form.nameChangeBox.rbExisting.isChecked())
self.form.nameChangeBox.cmdOK.click()
self.form.changeName()
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), TEST_STRING_2)
# Test cancel button
self.form.nameChangeBox.rbNew.setChecked(True)
self.form.nameChangeBox.txtNewCategory.setText(TEST_STRING_1)
self.form.nameChangeBox.cmdCancel.click()
self.form.changeName()
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), TEST_STRING_2)
self.form.nameChangeBox.cmdOK.click()
# Test delete data
self.form.nameChangeBox.removeData(None) # Nothing should happen
self.assertEqual(self.form.nameChangeBox.txtCurrentName.text(), TEST_STRING_2)
self.form.nameChangeBox.removeData([self.form.nameChangeBox.model_item]) # Should return to base state
self.baseNameStateCheck()
def testShowDataInfo(self):
"""
Test of the showDataInfo method
"""
# get Data1D
p_file=["cyl_400_20.txt"]
# Read in the file
output, message = self.form.readData(p_file)
self.form.loadComplete((output, message))
# select the data
self.form.treeView.selectAll()
# Call the tested method
self.form.showDataInfo()
# Test the properties
self.assertTrue(self.form.txt_widget.isReadOnly())
self.assertEqual(self.form.txt_widget.windowTitle(), "Data Info: cyl_400_20.txt")
self.assertIn("Waveln_max", self.form.txt_widget.toPlainText())
# Slider moved all the way up
self.assertEqual(self.form.txt_widget.verticalScrollBar().sliderPosition(), 0)
def testSaveDataAs(self):
"""
Test the Save As context menu action
"""
# get Data1D
p_file=["cyl_400_20.txt"]
# Read in the file
output, message = self.form.readData(p_file)
self.form.loadComplete((output, message))
# select the data
self.form.treeView.selectAll()
QFileDialog.getSaveFileName = MagicMock(return_value=("cyl_400_20_out", "(*.txt)"))
# Call the tested method
self.form.saveDataAs()
QFileDialog.getSaveFileName.assert_called_with(
caption="Save As",
filter='Text files (*.txt);;CanSAS 1D files(*.xml);;NXcanSAS files (*.h5)',
options=16,
parent=None)
QFileDialog.getSaveFileName.assert_called_once()
# get Data2D
p_file=["P123_D2O_10_percent.dat"]
# Read in the file
output, message = self.form.readData(p_file)
self.form.loadComplete((output, message))
# select the data
index = self.form.model.index(1, 0)
selmodel = self.form.treeView.selectionModel()
selmodel.setCurrentIndex(index, QItemSelectionModel.NoUpdate)
selmodel.select(index, QItemSelectionModel.Select|QItemSelectionModel.Rows)
QFileDialog.getSaveFileName = MagicMock(return_value="test.xyz")
# Call the tested method
self.form.saveDataAs()
QFileDialog.getSaveFileName.assert_called_with(
caption="Save As",
filter='IGOR/DAT 2D file in Q_map (*.dat);;NXcanSAS files (*.h5)',
options=16,
parent=None)
QFileDialog.getSaveFileName.assert_called_once()
def testQuickDataPlot(self):
"""
Quick data plot generation.
"""
# get Data1D
p_file=["cyl_400_20.txt"]
# Read in the file
output, message = self.form.readData(p_file)
self.form.loadComplete((output, message))
# select the data
self.form.treeView.selectAll()
Plotter.show = MagicMock() # for masking the display
self.form.quickDataPlot()
self.assertTrue(Plotter.show.called)
def notestQuickData3DPlot(self):
"""
Slow(er) 3D data plot generation.
"""
# get Data1D
p_file=["P123_D2O_10_percent.dat"]
# Read in the file
output, message = self.form.readData(p_file)
self.form.loadComplete((output, message))
# select the data
self.form.treeView.selectAll()
Plotter2D.show = MagicMock() # for masking the display
self.form.quickData3DPlot()
self.assertTrue(Plotter2D.show.called)
def testShowEditMask(self):
"""
Edit mask on a 2D plot.
TODO: add content once plotting finalized
"""
pass
def testDeleteItem(self):
"""
Delete selected item from data explorer
"""
# Mock the confirmation dialog with return=No
QMessageBox.question = MagicMock(return_value=QMessageBox.No)
# Populate the model
filename = ["cyl_400_20.txt", "cyl_400_20.txt", "cyl_400_20.txt"]
self.form.readData(filename)
# Assure the model contains three items
self.assertEqual(self.form.model.rowCount(), 3)
# Add an item to first file item
item1 = QtGui.QStandardItem("test")
item1.setCheckable(True)
self.form.model.item(0).appendRow(item1)
# Check the new item is in
self.assertTrue(self.form.model.item(0).hasChildren())
#select_item = self.form.model.item(0).child(3)
select_item = self.form.model.item(0)
select_index = self.form.model.indexFromItem(select_item)
# Open up items
self.form.current_view.expandAll()
# Select the newly created item
self.form.current_view.selectionModel().select(select_index, QtCore.QItemSelectionModel.Rows)
# Attempt at deleting
self.form.deleteSelectedItem()
# Test the warning dialog called once
self.assertTrue(QMessageBox.question.called)
# Assure the model still contains the items
self.assertEqual(self.form.model.rowCount(), 3)
# Now, mock the confirmation dialog with return=Yes
QMessageBox.question = MagicMock(return_value=QMessageBox.Yes)
# Select the newly created item
self.form.current_view.selectionModel().select(select_index, QtCore.QItemSelectionModel.Rows)
# delete it. now for good
self.form.deleteSelectedItem()
# Test the warning dialog called once
self.assertTrue(QMessageBox.question.called)
# Assure the model contains no items
self.assertEqual(self.form.model.rowCount(), 3)
def testClosePlotsForItem(self):
"""
Delete selected item from data explorer should also delete corresponding plots
"""
# Mock the confirmation dialog with return=No
QMessageBox.question = MagicMock(return_value=QMessageBox.No)
loader = Loader()
manager = DataManager()
PlotHelper.clear()
self.form.enableGraphCombo(None)
# Make sure the controls are disabled
self.assertFalse(self.form.cbgraph.isEnabled())
self.assertFalse(self.form.cmdAppend.isEnabled())
# Populate the model
filename = ["cyl_400_20.txt"]
self.form.readData(filename)
# Mask plotting
self.form.parent.workspace = MagicMock()
# Call the plotting method
self.form.newPlot()
time.sleep(1)
QApplication.processEvents()
# The plot was registered
self.assertEqual(len(PlotHelper.currentPlots()), 1)
self.assertEqual(len(self.form.plot_widgets), 1)
# could have leftovers from previous tests
#self.assertEqual(list(self.form.plot_widgets.keys()), ['Graph3'])
self.assertEqual(len(self.form.plot_widgets.keys()), 1)
# data index
model_item = self.form.model.item(0,0)
# Call the method
self.form.closePlotsForItem(model_item)
# See that no plot remained
self.assertEqual(len(PlotHelper.currentPlots()), 0)
self.assertEqual(len(self.form.plot_widgets), 0)
def testPlotsFromMultipleData1D(self):
"""
Tests interplay between plotting 1D datasets and plotting
a single 1D dataset from two separate fit tabs
GH issue 1546
"""
# prepare active_plots
plot1 = Plotter(parent=self.form)
data1 = Data1D()
data1.name = 'p1'
data1.plot_role = Data1D.ROLE_DATA
plot1.data = data1
plot2 = Plotter(parent=self.form)
data2 = Data1D()
data2.name = 'M2 [p1]'
data2.plot_role = Data1D.ROLE_DEFAULT
plot2.data = data2
plot3 = Plotter(parent=self.form)
data3 = Data1D()
data3.name = 'Residuals for M2[p1]'
data3.plot_role = Data1D.ROLE_RESIDUAL
plot3.data = data3
# pretend we're displaying three plots
self.form.active_plots['p1'] = plot1
self.form.active_plots['M2 [p1]'] = plot2
self.form.active_plots['Residuals for M2[p1]'] = plot3
# redoing plots from the same tab
# data -> must be shown
self.assertFalse(self.form.isPlotShown(data1))
# model and residuals are already shown
self.assertTrue(self.form.isPlotShown(data2))
self.assertTrue(self.form.isPlotShown(data3))
# Try from different fit page
plot4 = Plotter(parent=self.form)
data4 = Data1D()
data4.name = 'M1 [p1]'
data4.plot_role = Data1D.ROLE_DEFAULT
plot4.data = data1
# same data but must show, since different model
self.assertFalse(self.form.isPlotShown(data4))
def testPlotsFromMultipleData2D(self):
"""
Tests interplay between plotting 2D datasets and plotting
a single 2D dataset from two separate fit tabs
GH issue 1546
"""
# prepare active_plots
plot1 = Plotter(parent=self.form)
data1 = Data2D()
data1.name = 'p1'
data1.plot_role = Data1D.ROLE_DATA
plot1.data = data1
plot2 = Plotter(parent=self.form)
data2 = Data2D()
data2.name = 'M2 [p1]'
data2.plot_role = Data1D.ROLE_DEFAULT
plot2.data = data2
plot3 = Plotter(parent=self.form)
data3 = Data2D()
data3.name = 'Residuals for M2[p1]'
data3.plot_role = Data1D.ROLE_RESIDUAL
plot3.data = data3
# pretend we're displaying three plots
self.form.active_plots['p1'] = plot1
self.form.active_plots['M2 [p1]'] = plot2
self.form.active_plots['Residuals for M2[p1]'] = plot3
# redoing plots from the same tab
# data -> Already there, don't show
self.assertTrue(self.form.isPlotShown(data1))
# model and residuals are already shown
self.assertTrue(self.form.isPlotShown(data2))
self.assertTrue(self.form.isPlotShown(data3))
# Try from different fit page
plot4 = Plotter(parent=self.form)
data4 = Data2D()
data4.name = 'M1 [p1]'
plot4.data = data1
# same data but must show, since different model
self.assertFalse(self.form.isPlotShown(data4))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
d4fcfeef7b63063c5f5d8f152d236ed85efe6a52 | 41f4415409901876ac153459b4f6fe28a5a934a7 | /src/lambda_reduce2.py | 8775db4ef45cb5721860c3a3f4f8ed9297b3fde1 | [] | no_license | prasertcbs/python_tutorial | 4062a413df6192a71eb56f211501d710ddc26b90 | 2302ea030f6984e6ac3f77a366369e9c47502f5a | refs/heads/master | 2023-03-18T16:47:27.564924 | 2023-03-09T14:33:59 | 2023-03-09T14:33:59 | 320,748,324 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | from functools import reduce
def demo_reduce_logic():
age = 15
gender = 'F'
height = 165
weight = 48
crit = [age > 18, gender == 'F', height > 160, weight > 45]
ok = reduce(lambda cv, v: cv and v, crit)
# ok = crit[0] and crit[1] and crit[2] and crit[3]
print(ok)
def demo_join_list():
sep='|'
names=['Peter', 'Jenny', 'Linda', 'Bruce', 'Ann']
print(sep.join(names))
x = reduce(lambda cv, v: f'{cv}{sep}{v}', names)
print(x)
def demo_if_reduce():
numbers = [3, 6, 4, 1, 7, 8]
# sum even numbers
x = reduce(lambda cv, v: cv + (v if v % 2 == 0 else 0), numbers, 0)
# x = reduce(lambda cv, v: cv + (v if v % 2 == 0 else 0), numbers)
print(x)
def demo_list_dict():
names = [
{'name': 'Peter', 'score': 5},
{'name': 'Ann', 'score': 8},
{'name': 'Jenny', 'score': 7},
{'name': 'Bruce', 'score': 10}
]
x = reduce(lambda cv, v: cv + v['score'], names, 0)
# x = reduce(lambda cv, v: cv + v['score'], names)
print(x)
print(sum([v['score'] for v in names]))
if __name__ == "__main__":
# demo_reduce_logic()
# demo_join_list()
# demo_if_reduce()
demo_list_dict() | [
"[email protected]"
] | |
b7b671cd460bf84fd46bd1bcb8cbca90d74ec439 | 344e2956b4e2a30a8ef7532d951f96d995d1dd1e | /16_mmdet/lib/mmdet/models/dense_heads/rpn_test_mixin.py | a59422fd69ed7e61f628b90f4528d0cec11c2370 | [
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | karndeepsingh/Monk_Object_Detection | e64199705326e4cd65e4b29946cae210a4ef9649 | 425fa50a3236cb9097389646275da06bf9185f6b | refs/heads/master | 2022-12-22T18:26:53.933397 | 2020-09-28T12:49:50 | 2020-09-28T12:49:50 | 299,307,843 | 1 | 1 | Apache-2.0 | 2020-09-28T12:52:18 | 2020-09-28T12:52:17 | null | UTF-8 | Python | false | false | 2,151 | py | import sys
from mmdet.core import merge_aug_proposals
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class RPNTestMixin(object):
"""Test methods of RPN."""
if sys.version_info >= (3, 7):
async def async_simple_test_rpn(self, x, img_metas):
sleep_interval = self.rpn_head.test_cfg.pop(
'async_sleep_interval', 0.025)
async with completed(
__name__, 'rpn_head_forward',
sleep_interval=sleep_interval):
rpn_outs = self(x)
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def simple_test_rpn(self, x, img_metas):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Proposals of each image.
"""
rpn_outs = self(x)
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def aug_test_rpn(self, feats, img_metas):
samples_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(samples_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(samples_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
| [
"[email protected]"
] | |
ba069b17cac491de9884a946e82b583bcaab68e4 | 2713827b2f68aae65d4be516f024d51a7e762a97 | /doc/user/SConscript | 9e039dac7b1b08fbd7c55e9e8439769e0b70f962 | [
"MIT"
] | permissive | ptomulik/scons-tool-gnuplot | bd83fd6c4d85fb4391288d90d7e26f67ec4f1f29 | 2b92500feed48267d1bfdcaaae542a65dcc42b60 | refs/heads/master | 2021-05-21T12:03:14.776722 | 2020-04-16T22:36:33 | 2020-04-16T22:36:33 | 9,398,514 | 1 | 0 | null | 2020-04-16T22:36:35 | 2013-04-12T16:21:39 | Python | UTF-8 | Python | false | false | 1,493 | #
# Copyright (c) 2013-2020 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
Import(['env'])
if 'user-doc' in COMMAND_LINE_TARGETS:
env.Tool('docbook')
pdf = env.DocbookPdf('manual')
html = env.DocbookHtml('manual')
env.Ignore('.', pdf + html)
env.Alias( 'user-doc', pdf + html )
env.AlwaysBuild( 'user-doc' )
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=scons expandtab tabstop=4 shiftwidth=4:
| [
"[email protected]"
] | ||
6a3716a612b8ebb22cb755b1aaab49a349259463 | fc38005e1474ce803a272387d401da6cd0a8c0ef | /lter_pasta/src/pasta_gmn_adapter/app/restrict_to_verb.py | b5ab1b462e5d53d91264fe160c26b0abe8ce0482 | [
"Apache-2.0"
] | permissive | DataONEorg/SlenderNodes | 4a3876e12d46c031b99821717533e2f4f39a57c8 | 34dd4ed9d581d259a70d7c9a884f520226dd2691 | refs/heads/master | 2023-02-18T08:39:24.072662 | 2022-01-07T13:12:18 | 2022-01-07T13:12:18 | 53,552,615 | 1 | 3 | Apache-2.0 | 2023-02-08T02:36:49 | 2016-03-10T03:38:03 | HTML | UTF-8 | Python | false | false | 1,243 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`restrict_to_verb`
==========================
:Synopsis: Limit views to be called only with specific verb.
:Author: Roger Dahl
"""
import django.http
def _allow_only_verbs(f, verbs):
def wrap(request, *args, **kwargs):
if request.method not in verbs:
return django.http.HttpResponseNotAllowed(verbs)
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def get(f):
return _allow_only_verbs(f, ['GET'])
def put(f):
return _allow_only_verbs(f, ['PUT'])
def post(f):
return _allow_only_verbs(f, ['POST'])
def delete(f):
return _allow_only_verbs(f, ['DELETE'])
| [
"[email protected]"
] | |
b968ff45bd84273d0e43339d72915b1cd40cf9af | 0bff4e342d15d90dde7ed0b8a8a479b2c82d17d7 | /home/check_images.py | 7a787f6253d0666f6a04e6ff8730a63e9d128ce2 | [] | no_license | AyodejiOkusanya/Dog_classifier_project | e272070d21646d11c5724724c0abdc691c4b8226 | 13a8f29d9506332bd0bc23415501918565147624 | refs/heads/master | 2020-07-26T23:41:14.535123 | 2019-09-16T13:31:38 | 2019-09-16T13:31:38 | 208,800,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/check_images.py
#
# TODO 0: Add your information below for Programmer & Date Created.
# PROGRAMMER: Ayodeji Okusanya
# DATE CREATED: 11th September 2019
# REVISED DATE:
# PURPOSE: Classifies pet images using a pretrained CNN model, compares these
# classifications to the true identity of the pets in the images, and
# summarizes how well the CNN performed on the image classification task.
# Note that the true identity of the pet (or object) in the image is
# indicated by the filename of the image. Therefore, your program must
# first extract the pet image label from the filename before
# classifying the images using the pretrained CNN model. With this
# program we will be comparing the performance of 3 different CNN model
# architectures to determine which provides the 'best' classification.
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
from time import time, sleep
# Imports print functions that check the lab
from print_functions_for_lab_checks import *
# Imports functions created for this program
from get_input_args import get_input_args
from get_pet_labels import get_pet_labels
from classify_images import classify_images
from adjust_results4_isadog import adjust_results4_isadog
from calculates_results_stats import calculates_results_stats
from print_results import print_results
# Main program function defined below
def main():
# TODO 0: Measures total program runtime by collecting start time
start_time = time()
# TODO 1: Define get_input_args function within the file get_input_args.py
# This function retrieves 3 Command Line Arugments from user as input from
# the user running the program from a terminal window. This function returns
# the collection of these command line arguments from the function call as
# the variable in_arg
in_arg = get_input_args()
# Function that checks command line arguments using in_arg
check_command_line_arguments(in_arg)
# TODO 2: Define get_pet_labels function within the file get_pet_labels.py
# Once the get_pet_labels function has been defined replace 'None'
# in the function call with in_arg.dir Once you have done the replacements
# your function call should look like this:
# get_pet_labels(in_arg.dir)
# This function creates the results dictionary that contains the results,
# this dictionary is returned from the function call as the variable results
results = get_pet_labels(in_arg.dir)
# Function that checks Pet Images in the results Dictionary using results
check_creating_pet_image_labels(results)
# TODO 3: Define classify_images function within the file classiy_images.py
# Once the classify_images function has been defined replace first 'None'
# in the function call with in_arg.dir and replace the last 'None' in the
# function call with in_arg.arch Once you have done the replacements your
# function call should look like this:
# classify_images(in_arg.dir, results, in_arg.arch)
# Creates Classifier Labels with classifier function, Compares Labels,
# and adds these results to the results dictionary - results
classify_images(in_arg.dir, results, in_arg.arch)
# Function that checks Results Dictionary using results
check_classifying_images(results)
# TODO 4: Define adjust_results4_isadog function within the file adjust_results4_isadog.py
# Once the adjust_results4_isadog function has been defined replace 'None'
# in the function call with in_arg.dogfile Once you have done the
# replacements your function call should look like this:
# adjust_results4_isadog(results, in_arg.dogfile)
# Adjusts the results dictionary to determine if classifier correctly
# classified images as 'a dog' or 'not a dog'. This demonstrates if
# model can correctly classify dog images as dogs (regardless of breed)
adjust_results4_isadog(results, in_arg.dogfile)
# Function that checks Results Dictionary for is-a-dog adjustment using results
check_classifying_labels_as_dogs(results)
# TODO 5: Define calculates_results_stats function within the file calculates_results_stats.py
# This function creates the results statistics dictionary that contains a
# summary of the results statistics (this includes counts & percentages). This
# dictionary is returned from the function call as the variable results_stats
# Calculates results of run and puts statistics in the Results Statistics
# Dictionary - called results_stats
results_stats = calculates_results_stats(results)
# Function that checks Results Statistics Dictionary using results_stats
check_calculating_results(results, results_stats)
# TODO 6: Define print_results function within the file print_results.py
# Once the print_results function has been defined replace 'None'
# in the function call with in_arg.arch Once you have done the
# replacements your function call should look like this:
# print_results(results, results_stats, in_arg.arch, True, True)
# Prints summary results, incorrect classifications of dogs (if requested)
# and incorrectly classified breeds (if requested)
print_results(results, results_stats, in_arg.arch, True, True)
# TODO 0: Measure total program runtime by collecting end time
end_time = time()
# TODO 0: Computes overall runtime in seconds & prints it in hh:mm:ss format
tot_time = end_time - start_time #calculate difference between end time and start time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
# Call to main function to run the program
if __name__ == "__main__":
main()
| [
"github email address"
] | github email address |
18a586efdc6122d4271bb81d3d0b85f45f3fc386 | 000a4b227d970cdc6c8db192f4437698cb782721 | /python/helpers/typeshed/stubs/redis/redis/commands/json/path.pyi | d33df3045beadd6a79613752d247d376006a94bb | [
"Apache-2.0",
"MIT"
] | permissive | trinhanhngoc/intellij-community | 2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d | 1d4a962cfda308a73e0a7ef75186aaa4b15d1e17 | refs/heads/master | 2022-11-03T21:50:47.859675 | 2022-10-19T16:39:57 | 2022-10-19T23:25:35 | 205,765,945 | 1 | 0 | Apache-2.0 | 2019-09-02T02:55:15 | 2019-09-02T02:55:15 | null | UTF-8 | Python | false | false | 125 | pyi | class Path:
strPath: str
@staticmethod
def rootPath() -> str: ...
def __init__(self, path: str) -> None: ...
| [
"[email protected]"
] | |
c3c2c3822b57e06c8b713582230fd8a9950bcfcf | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/inventory/InventoryUIGoldItem.py | 1a3afd314852cc484b7e5797f1275e913e7e2160 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui import GuiPanel, PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from otp.otpbase import OTPLocalizer
from pirates.inventory.InventoryUIGlobals import *
from pirates.inventory.InventoryGlobals import *
from pirates.inventory import InventoryUIStackItem
class InventoryUIGoldItem(InventoryUIStackItem.InventoryUIStackItem):
def __init__(self, manager, itemTuple, imageScaleFactor = 1.0, update = False):
InventoryUIStackItem.InventoryUIStackItem.__init__(self, manager, itemTuple, imageScaleFactor = imageScaleFactor, showMax = 0, update = False)
self.initialiseoptions(InventoryUIGoldItem)
gui = loader.loadModel('models/gui/toplevel_gui')
self['image'] = gui.find('**/treasure_w_coin*')
self['image_scale'] = 0.1 * imageScaleFactor
self.imageScale = 3.0
self.textScale = 1.1
if update:
self.accept(getCategoryChangeMsg(localAvatar.getInventoryId(), InventoryType.ItemTypeMoney), self.updateAmount)
def destroy(self):
self.ignoreAll()
InventoryUIStackItem.InventoryUIStackItem.destroy(self)
def getName(self):
return PLocalizer.GoldName
def updateAmount(self, caller = None):
inventory = localAvatar.getInventory()
if inventory:
amount = inventory.getGoldInPocket()
self.amount = amount
self.updateAmountText()
| [
"[email protected]"
] | |
05464dd5b1fdbd853afe7496ba59a07cb777199b | ce26ae9315d7814f6dbfa1918c7f5c5a6293e49b | /Lammps/Pore/qsub/Launcher.py.bak | 145233501f45b556ca2fb8eec925aebc7b2a894c | [] | no_license | sramirezh/Utilities | 25982a28cc40c6bea47c8ccbd95870addd2e826d | a86e72787059e511983cd047f3027aa10eba7090 | refs/heads/master | 2023-02-09T10:16:28.571756 | 2023-01-31T00:14:28 | 2023-01-31T00:14:28 | 89,708,819 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | bak | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 28 09:24:03 2019
"Creates replicas of simulations starting from configurations during the equilibration"
@author: sr802
"""
import glob
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../')) #This falls into Utilities path
import Lammps.core_functions as cf
import shutil
from simulation_utilities import simulation
cwd = os.getcwd() #current working directory
dir_path = os.path.dirname(os.path.realpath(__file__))#Path of this python script
#Main
#Getting the path to all the restart files
files=glob.glob('./Try/particle/*')
template=cwd+'/Template'
home=cwd+'/mu_force'
times=cf.extract_digits(files)
times=[str(int(time)) for time in times]
#Takign the last N configurations
n_conf=4
conf_times=times[-n_conf:]
files_analysis=cf.parameter_finder(files,conf_times)
shutil.rmtree(home,ignore_errors=True)
for i in files_analysis:
#The extraction of the parameters for the simulation comes here
path="%s/%s"%(home,times[i])
time=int(cf.extract_digits(files[i])[-1])
name=str(time)
restart=files[i]
#Creating the simulation instance
sim=simulation(home,template,name,restart)
sim.create_folder()
sim.create_qsub('test',1,1,1,'input.lmp',)
#Mofications to the files here
file_name="input.lmp"
file_path=sim.folder+'/'+file_name
value_modify=sim.initial_conf.split('/')[-1]
cf.modify_file(file_path,'read_restart','read_restart\t%s\n'%value_modify)
| [
"[email protected]"
] | |
46bc65733acedf9596954169791412496a1c48f4 | 8b2e795c3040a2ef1d3f0c21752bec57a0614bd6 | /venv/Scripts/enhancer.py | 294eaaa77d0cdd6cb0de824a7d27a60fe56e0e2b | [] | no_license | harshit8858/NHDO | c75e244dfdc91817b3047d65c7be610f3e18aba3 | 6a5ea2de4ba607c20c0b9bd241e6b1c82090eba9 | refs/heads/master | 2023-01-06T20:18:33.795898 | 2018-01-03T07:39:04 | 2018-01-03T07:39:04 | 105,629,451 | 1 | 3 | null | 2022-12-20T22:32:34 | 2017-10-03T08:26:57 | Python | UTF-8 | Python | false | false | 1,603 | py | #!c:\users\harshi~1\nhdo\venv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk, ImageEnhance
#
# enhancer widget
class Enhance(tkinter.Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
tkinter.Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
tkinter.Label(self, image=self.tkim).pack()
# scale
s = tkinter.Scale(self, label=name, orient=tkinter.HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = float(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
if len(sys.argv) != 2:
print("Usage: enhancer file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(tkinter.Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(tkinter.Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(tkinter.Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| [
"[email protected]"
] | |
48a4d700a45b7466b60ce18dc1bbe99043d5a7ed | 42a7abc31b447d1bfa5db19d5e047c475a00ca81 | /leetcode/contest/2017/mar4/531.py | 15c29bfc748d3be08dcc2797495bcce045c9fc0b | [] | no_license | jonathantsang/CompetitiveProgramming | f01f3727e49e03038a981871f29234fccfac0e7c | 05d49ca91ac2a4d414dbb38b01266962ce68f34a | refs/heads/master | 2022-12-12T11:52:13.327425 | 2022-12-07T20:37:37 | 2022-12-07T20:37:37 | 121,400,994 | 2 | 0 | null | 2020-10-08T19:24:10 | 2018-02-13T15:43:07 | Python | UTF-8 | Python | false | false | 1,229 | py | class Solution(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:rtype: int
"""
lonely = 0
picleng = len(picture)
row = []
column = []
for w in range(0, 501):
row.append(0)
for w in range(0, 501):
column.append(0)
## Find the number of B in the rows
for j in range(0, picleng):
leng = len(picture[j]);
for i in range(0, leng):
if(picture[j][i] == 'B'):
row[j] += 1
## Find the number of B in the columns
for j in range(0, picleng):
leng = len(picture[j]);
for i in range(0, leng):
if(picture[j][i] == 'B'):
column[i] += 1
## Go through all the rows and mark not lonely
for j in range(0, picleng):
leng = len(picture[j])
for i in range(0, leng):
## If it is a B
if('B' == picture[j][i]):
if(row[j] == 1 and column[i] == 1):
lonely += 1
return lonely
| [
"[email protected]"
] | |
f19f8a0455cc9545e32288d7e8eefcc1050952ce | 88eb24f0890457b994e867b68e1b2d3a34a3b900 | /rv/uniform.py | ee15e496062954df619b976f1ae08b5fc1370b41 | [] | no_license | jiye-ML/Probalistic-Graphical-Model-21-Sample-Method | 9cf4168ee5cf2fb33d92236997fc03ff84a2243b | a17351de817dd340a189696592dee9ec77e49edd | refs/heads/master | 2020-05-17T08:44:24.075793 | 2019-04-26T12:56:22 | 2019-04-26T12:56:22 | 183,615,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | import numpy as np
from rv.rv import RandomVariable
class Uniform(RandomVariable):
"""
Uniform distribution
p(x|a, b)
= 1 / ((b_0 - a_0) * (b_1 - a_1)) if a <= x <= b else 0
"""
def __init__(self, low, high):
"""
construct uniform distribution
Parameters
----------
low : int, float, or np.ndarray
lower boundary
high : int, float, or np.ndarray
higher boundary
"""
super().__init__()
low = np.asarray(low)
high = np.asarray(high)
assert low.shape == high.shape
assert (low <= high).all()
self.low = low
self.high = high
self.value = 1 / np.prod(high - low)
pass
@property
def low(self):
return self.parameter["low"]
@low.setter
def low(self, low):
self.parameter["low"] = low
pass
@property
def high(self):
return self.parameter["high"]
@high.setter
def high(self, high):
self.parameter["high"] = high
pass
@property
def ndim(self):
return self.low.ndim
@property
def size(self):
return self.low.size
@property
def shape(self):
return self.low.shape
@property
def mean(self):
return 0.5 * (self.low + self.high)
def _pdf(self, X):
higher = np.logical_and.reduce(X >= self.low, 1)
lower = np.logical_and.reduce(X <= self.high, 1)
return self.value * np.logical_and(higher, lower)
def _draw(self, sample_size=1):
u01 = np.random.uniform(size=(sample_size,) + self.shape)
return u01 * (self.high - self.low) + self.low
| [
"[email protected]"
] | |
a4a582cb04903022b261b7f8fc8ea362601afe49 | ac52ddddf672216998a33d5e6905a1a1e4d97a55 | /pipe/scripts/filter_misaligned_shared_indels.py | 549616ebf8ce5e5022ab69d39483766500b714b8 | [
"MIT"
] | permissive | EddieKHHo/megadaph | 62e29e72896a5969b21d531a20a95fbce1589c3c | 23010e7ce9ee6cceedaa3d4ba3e990e9af34aae0 | refs/heads/master | 2021-09-22T00:53:15.881183 | 2018-09-04T02:14:21 | 2018-09-04T02:14:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | #!/usr/bin/env python3
"""Remove indels which are falsely aligned as runs of single base mismatches in
other samples."""
# This script is awful
import sys
import click
from pandas import read_csv
from plumbum.cmd import rg
BASE_IDX = {
"A": [4, 8],
"T": [5, 9],
"C": [6, 10],
"G": [7, 11]
}
def is_insertion(indel):
return len(indel["REF"]) < len(indel["ALT"])
def get_ref_base(chrom, pos, pileup):
exit_code, stdout, stderr = rg.run(
["\s".join([chrom, str(pos)]), pileup],
retcode=None
)
if exit_code != 0:
return 0
return stdout.split()[2]
def check_indel(indel, pileups):
if is_insertion(indel):
bad_bases = indel["ALT"][1:]
indel_length = len(indel["ALT"])
else:
indel_length = len(indel["REF"])
bad_base_start = indel["POS"] + indel_length
bad_base_pos = range(bad_base_start, bad_base_start + indel_length)
bad_bases = [
get_ref_base(indel["CHROM"], pos, pileups[0])
for pos in bad_base_pos
]
if any([base == 0 for base in bad_bases]):
return True
adj_base = int(indel["POS"] + 1)
adj_base_pos = range(adj_base, adj_base + indel_length - 1)
for bad, adj in zip(bad_bases, adj_base_pos):
if get_ref_base(indel["CHROM"], adj, pileups[0]) != bad:
for pileup in pileups:
counts = rg.run(
["\s".join([indel["CHROM"], str(adj)]),
pileup], retcode=None
)[1].split()
if counts:
target_base_counts = (
int(counts[BASE_IDX[bad][0]]) +
int(counts[BASE_IDX[bad][1]])
)
if target_base_counts > 0:
return False
return True
def filter_indels(pileups, indels):
if len(indels):
passing = indels.apply(check_indel, axis=1, pileups=pileups)
filtered_indels = indels[passing]
else:
filtered_indels = indels
filtered_indels.to_csv(sys.stdout, sep="\t", index=False)
@click.command()
@click.option("--indels", help="TSV file containing indels.")
@click.argument("pileups", nargs=-1)
def cli(pileups, indels):
filter_indels(pileups, read_csv(indels, sep="\t"))
if __name__ == "__main__":
cli()
| [
"[email protected]"
] | |
13a2d878d94dd5ce7ae75e793523a256cbb3845e | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /Ekeopara_Praise/Phase 1/Python Basic 1/Day4 Tasks/Task 4.py | 859dab1a442dd79e17c8a15f153c9475f280c367 | [
"MIT"
] | permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 299 | py | '''4. Write a Python program to test whether a passed letter is a vowel or not.'''
letr = str(input("Enter any letter: "))
vowel = ["A", 'a', "E", 'e', "I", 'i', "O", 'o', "U", 'u']
if letr in vowel:
print("The letter entered is a vowel!")
else:
print("The letter entered is not a vowel!") | [
"[email protected]"
] | |
0ea60aa86c763bb8a7b07ee39bbe0bdd0cbcfddd | 50725a9ada0fe57fa2b49af36863eb1ce9d8c134 | /lists/forms.py | 319463e501e13dc9c5482844c30dda02536e6d03 | [] | no_license | dungnguyen1991/superlists | 1381537a2b168b6d5ea5bac23608f8f425ce642f | e3bcca1876275414d5dba1f83c482eadaff381d3 | refs/heads/master | 2023-08-28T07:09:12.855120 | 2021-10-12T09:24:21 | 2021-10-12T09:24:21 | 396,571,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | from django import forms
from django.core.exceptions import ValidationError
from lists.models import Item, List
EMPTY_ITEM_ERROR = "You can't have an empty list item"
DUPLICATE_ITEM_ERROR = "You've already got this in your list"
class ItemForm(forms.models.ModelForm):
class Meta:
model = Item
fields = ('text', )
widgets = {
'text': forms.TextInput(attrs={
'placeholder': 'Enter a to-do item',
'class': 'form-control input-lg',
})
}
error_messages = {
'text': {
'required': EMPTY_ITEM_ERROR
}
}
class ExistingListItemForm(ItemForm):
def __init__(self, for_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.list = for_list
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError as e:
e.error_dict = {'text': [DUPLICATE_ITEM_ERROR]}
self._update_errors(e)
class NewListForm(ItemForm):
def save(self, owner):
if owner.is_authenticated:
return List.create_new(first_item_text=self.cleaned_data['text'], owner=owner)
else:
return List.create_new(first_item_text=self.cleaned_data['text']) | [
"[email protected]"
] | |
014ee15478ee543d6b6c2633912f2e2076087098 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/redis/azure-mgmt-redis/generated_samples/redis_cache_linked_server_get.py | b448a35a6f903c774db566a3add94321ac068413 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,543 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.redis import RedisManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-redis
# USAGE
python redis_cache_linked_server_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = RedisManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.linked_server.get(
resource_group_name="rg1",
name="cache1",
linked_server_name="cache2",
)
print(response)
# x-ms-original-file: specification/redis/resource-manager/Microsoft.Cache/stable/2023-04-01/examples/RedisCacheLinkedServer_Get.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
439f9f249c544c5523729e5e44bf9a0b2f7f1174 | 954c112d4805da5b1c8ba6460ae137935d85fe69 | /advanced/methods.py | 972de67fc2b7a1924eb22c1ebfe1fed93ab18985 | [] | no_license | ProsenjitKumar/DataHeadOffice | b329b96b9efa59976031483dc32beb5e09082528 | 89dc2b4fe73bc952252b190d3c64186908a026f1 | refs/heads/master | 2020-04-24T10:23:25.974884 | 2019-04-29T17:10:36 | 2019-04-29T17:10:36 | 171,892,722 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | class Employee:
def __init__(self, first_name, last_name, pay):
self.first_name = first_name
self.last_name = last_name
self.pay = pay
@classmethod
def another_stuff(cls, full_name):
cls.full_name = full_name
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
@staticmethod
def is_workkday(day):
if day.weekday() == 5 or day.weekday() == 6: # saturday
return False
return True
import datetime
my_date = datetime.date(2019, 2, 22)
print(Employee.is_workkday(my_date))
print(my_date.weekday())
# emp_obj = Employee('Prosenjit', 'Das', 26980)
# emp_obj1 = Employee('Samoli', 'Das', 5688)
# emp_obj.another_stuff('Prosenjit Das')
# print(Employee.full_name)
# print(emp_obj.full_name)
# print(emp_obj1.full_name)
# emp_str_1 = 'Prosenjit-Das-85200'
# emp_str_2 = 'Jalil-Khan-56870'
# emp_str_3 = 'Suvo-Roy-87452'
# first, last, pay = emp_str_1.split('-')
# new_emp_1 = Employee(first, last, pay)
# new_emp_1 = Employee.from_string(emp_str_1)
# print(new_emp_1.first_name)
# print(new_emp_1.pay)
| [
"[email protected]"
] | |
eb4caeb850832163e9a1b127ded1f4c34520b942 | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /Templates/0133. Clone Graph.py | c68d1c458509885aef75e70880935fb3dbd2c431 | [] | no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,897 | py | """
# Definition for a Node.
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
"""
# method 2, combine node creation and relationship copy
# make sure that: 1. a node is only copied once, 2. a node's relationship is only processed once
class Solution(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
if not node:
return None
d = {node: Node(node.val, [])}
visited = set() # visited means the nodes whose copies' neighbors has been copied
stack = [node]
while stack:
p = stack.pop()
if p in visited:
continue
visited.add(p)
for nei in p.neighbors:
if nei not in d:
d[nei] = Node(nei.val, [])
d[p].neighbors.append(d[nei])
stack.append(nei)
return d[node]
# method 1: separate creating node and copying relation
class Solution1(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
d = {}
stack = [node]
while stack:
p = stack.pop()
if p in d:
continue
else:
d[p] = Node(p.val, [])
for nei in p.neighbors:
stack.append(nei)
stack = [node]
visited = set() # save nodes whose neighbor relationship has been cloned
while stack:
p = stack.pop()
if p in visited:
continue
else:
visited.add(p)
for nei in p.neighbors:
d[p].neighbors.append(d[nei])
stack.append(nei)
return d[node]
"""
Given a reference of a node in a connected undirected graph, return a deep copy (clone) of the graph.
Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},{"$id":"3","neighbors":[{"$ref":"2"},
{"$id":"4","neighbors":[{"$ref":"3"},{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
The number of nodes will be between 1 and 100.
The undirected graph is a simple graph, which means no repeated edges and no self-loops in the graph.
Since the graph is undirected, if node p has node q as neighbor, then node q must have node p as neighbor too.
You must return the copy of the given node as a reference to the cloned graph.
"""
| [
"[email protected]"
] | |
240ba5faf2e6c26a2582db83afe8123094b20c04 | 1515be3015ad988278d5a095416c0a0066a02757 | /src/users/models/componentsschemasmicrosoft_graph_detectedappallof1.py | 5b14ba5d58afbceee708dcfb5a655fe2b76c8442 | [
"MIT"
] | permissive | peombwa/Sample-Graph-Python-Client | 2ad494cc5b5fe026edd6ed7fee8cac2dd96aaa60 | 3396f531fbe6bb40a740767c4e31aee95a3b932e | refs/heads/master | 2020-12-29T09:50:38.941350 | 2020-02-05T22:45:28 | 2020-02-05T22:45:28 | 238,561,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphDetectedappallof1(Model):
"""detectedApp.
A managed or unmanaged app that is installed on a managed device. Unmanaged
apps will only appear for devices marked as corporate owned.
:param display_name: Name of the discovered application. Read-only
:type display_name: str
:param version: Version of the discovered application. Read-only
:type version: str
:param size_in_byte: Discovered application size in bytes. Read-only
:type size_in_byte: long
:param device_count: The number of devices that have installed this
application
:type device_count: int
:param managed_devices:
:type managed_devices: list[~users.models.MicrosoftgraphmanagedDevice]
"""
_validation = {
'device_count': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'size_in_byte': {'key': 'sizeInByte', 'type': 'long'},
'device_count': {'key': 'deviceCount', 'type': 'int'},
'managed_devices': {'key': 'managedDevices', 'type': '[MicrosoftgraphmanagedDevice]'},
}
def __init__(self, display_name=None, version=None, size_in_byte=None, device_count=None, managed_devices=None):
super(ComponentsschemasmicrosoftGraphDetectedappallof1, self).__init__()
self.display_name = display_name
self.version = version
self.size_in_byte = size_in_byte
self.device_count = device_count
self.managed_devices = managed_devices
| [
"[email protected]"
] | |
3d5ebcfd0cf48f5f261c0ae6530b42549b161e95 | 33c7a8d150f0f95f5240c1ad8b458284e4db7ae0 | /musicdl/modules/utils/logger.py | 35a600ed1b3cd771a681fbbdaeb69ddeb9c8bedf | [
"MIT"
] | permissive | Yellowhxc/musicdl | 55a81d75923f7d3cf9917aa6ef635d4ddabdd4ef | 97d6254c9427046fef5d2ef1e65297cf04397728 | refs/heads/master | 2023-01-07T04:56:26.223829 | 2020-11-03T17:37:22 | 2020-11-03T17:37:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | '''
Function:
一些终端打印工具
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import logging
from prettytable import PrettyTable
'''打印日志类'''
class Logger():
def __init__(self, logfilepath, **kwargs):
setattr(self, 'logfilepath', logfilepath)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[logging.FileHandler(logfilepath),
logging.StreamHandler()])
@staticmethod
def log(level, message):
logging.log(level, message)
def debug(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.DEBUG, message)
def info(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.INFO, message)
def warning(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.WARNING, message)
def error(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.ERROR, message)
'''打印表格'''
def printTable(title, items):
assert isinstance(title, list) and isinstance(items, list), 'title and items should be list in function printTable'
table = PrettyTable(title)
for item in items: table.add_row(item)
print(table)
return table | [
"[email protected]"
] | |
c633b8af692b31e91dbcff3ebf60b0db3f82e672 | 59080f5116b9e8f625b5cc849eb14b7ff9d19f3d | /088 Flask/test.py | 8999a6a4206b15516ef8d305813e52da1540fd61 | [] | no_license | yyq1609/Python_road | eda2bcd946b480a05ec31cdcb65e35b3f3e739d1 | e9ba2f47c8dd2d00a6e5ddff03c546152efd8f49 | refs/heads/master | 2020-09-11T11:51:35.903284 | 2019-11-11T13:02:21 | 2019-11-11T13:02:21 | 222,054,462 | 1 | 0 | null | 2019-11-16T05:58:13 | 2019-11-16T05:58:12 | null | UTF-8 | Python | false | false | 31 | py | print(__file__)
print(__name__) | [
"[email protected]"
] | |
589872846ac6ea51c041f6cd2f35f8715f7aa528 | d04c79e5ed09d47f306eeee2bd9ef9a1a67ef693 | /20200316/118. Pascal's Triangle.py | 16df0c80adff5ce4749bdf955b38fab3aeb9f641 | [] | no_license | mycomax0416/LeetCode | fe1d345d9b9355b37d9aa33b2633597de65a3838 | b706a57a64313ca48df9eb61cb2e08d16ddf35b1 | refs/heads/master | 2021-03-09T20:22:24.356206 | 2020-04-04T12:27:39 | 2020-04-04T12:27:39 | 246,377,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
ans = [[1]]
for n in range(numRows-1):
row = [1]
for idx in range(len(ans)-1):
row.append(ans[-1][idx]+ans[-1][idx+1])
row.append(1)
ans.append(row)
return ans | [
"[email protected]"
] | |
4d77d33b162d01b0729e4f0492e7ad90b02aa416 | 4ca8df3a127e9b15cbfecea6505928741f685a63 | /gongfei/month04/spider/爬虫滑块验证.py | 7bce8222aa6cd7fdcb4220a8bd736da8e4350889 | [] | no_license | gongfei6644/gongfei | 2beb082c56197bc23ca20a6927ff6c10d8beaa83 | bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4 | refs/heads/master | 2022-11-30T20:49:22.213040 | 2020-08-16T12:52:28 | 2020-08-16T12:52:28 | 286,283,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,385 | py | # Python爬虫滑块验证
# 滑块验证网址:http://www.cnbaowen.net/api/geetest/
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait # 等待元素加载的
from selenium.webdriver.common.action_chains import ActionChains #拖拽
from selenium.webdriver.support import expected_conditions as EC #等待查找
from selenium.common.exceptions import TimeoutException, NoSuchElementException #错误
from selenium.webdriver.common.by import By #标签查找
from PIL import Image #处理图片
import requests #处理网络请求
import time
import re #正则
import random #随机数
from io import BytesIO
import os
def merge_image(image_file,location_list):
"""
拼接图片
:param image_file:
:param location_list:
:return:
"""
im = Image.open(image_file) #打开图片二进制文件
im.save('code.jpg') #保存到code.jpg
new_im = Image.new('RGB',(260,116)) #空白图片长260,宽116的实例
# 把无序的图片 切成52张小图片
im_list_upper = [] #上边边
im_list_down = [] #下半边
# print(location_list)
for location in location_list:
# print(location['y'])
if location['y'] == -58: # 上半边
#im.crop(图片的x左坐标,图片的y上坐标,图片的x右坐标,图片的y下坐标)左、上、右和下像素的4元组
im_list_upper.append(im.crop((abs(location['x']),58,abs(location['x'])+10,116)))
if location['y'] == 0: # 下半边
#同理如上,返回一个对象的对象PIL.Image.Image的object
im_list_down.append(im.crop((abs(location['x']),0,abs(location['x'])+10,58)))
x_offset = 0
for im in im_list_upper: #拼接上半部分
new_im.paste(im,(x_offset,0)) # 把小图片放到 新的空白图片上,im为无序图片,(x_offset,0)用的是二元组,可以为四元(左上右下),二元或不填,默认为左上方填充
x_offset += im.size[0] #每一次一定图片的长度
x_offset = 0 #重置为零,下面同样的拼接下半部分
for im in im_list_down:
new_im.paste(im,(x_offset,58))
x_offset += im.size[0]
# new_im.show() #显示生成的图片
return new_im #返回这张图片
def get_image(driver,div_path):
'''
下载无序的图片 然后进行拼接 获得完整的图片
:param driver:
:param div_path:
:return:
'''
time.sleep(2)
background_images = driver.find_elements_by_xpath(div_path)
location_list = []
image_url=""
for background_image in background_images:
location = {}
result = re.findall('background-image: url\("(.*?)"\); background-position: (.*?)px (.*?)px;',background_image.get_attribute('style')) #
# print(result)
location['x'] = int(result[0][1]) #获取无序图片x坐标
location['y'] = int(result[0][2]) #获取无序图片y坐标
image_url = result[0][0].replace('webp','jpg') #图片链接
location_list.append(location) #将xy坐标 字典放入列表中 {"x":"-157","y":"-58"}
print('==================================')
# '替换url http://static.geetest.com/pictures/gt/579066de6/579066de6.webp'
#content响应的内容,以字节为单位
image_result = requests.get(image_url).content #b'\xff\ 字节
#BytesIO相当于实现一个with open:
# with open('1.jpg','wb') as f:
# f.write(image_result)
image_file = BytesIO(image_result) # 是一张无序的图片 返回一个对象<_io.BytesIO object at 0x000001B5A139D3B8>
image = merge_image(image_file,location_list) #拼接图片 <PIL.Image.Image image mode=RGB size=260x116 at 0x1B5A131AD30>
return image
def get_track(distance):
'''
拿到移动轨迹,模仿人的滑动行为,先匀加速后匀减速
匀变速运动基本公式:
①v=v0+at
②s=v0t+(1/2)at²
③v²-v0²=2as
:param distance: 需要移动的距离
:return: 存放每0.2秒移动的距离
'''
# 初速度
v=0
# 单位时间为0.2s来统计轨迹,轨迹即0.2内的位移
t=0.2
# 位移/轨迹列表,列表内的一个元素代表0.2s的位移
tracks=[]
# 当前的位移
current=0
accuracy_distance=distance
# 到达目标值的八分之七,开始减速
mid=distance * 3/5
# distance += 20 # 先滑过一点,最后再反着滑动回来
# a = random.randint(1,3)
while current < distance:
if current < mid:
# 加速度越小,单位时间的位移越小,模拟的轨迹就越多越详细
a = random.randint(2,4) # 加速运动的加速度
else:
a = -random.randint(2,4) # 减速运动的加速度
# 初速度
v0 = v
# 0.2秒时间内的位移
s = v0*t+0.5*a*(t**2) #s=v0t+(1/2)at²
# 当前的位置
current += s
# 添加到轨迹列表
print(a)
tracks.append(round(s)) #添加每一次x位置的坐标
# 速度已经达到v,该速度作为下次的初速度
v= v0+a*t #记录每一次0.2s的末速度,作为下一个0.2s的初速度,拼接一个滑动动作
# 反着滑动到大概准确位置
if abs(current - distance) > 1:
s = -(current - distance - 1)
tracks.append(round(s)) # 添加每一次x位置的坐标
print(current,"<><><>",distance)
# for i in range(4):
# tracks.append(-random.randint(1,3))
return tracks #返回位置坐标列表
def get_distance(image1,image2):
'''
拿到滑动验证码需要移动的距离
:param image1:没有缺口的图片对象
:param image2:带缺口的图片对象
:return:需要移动的距离
'''
# print('size', image1.size)
threshold = 50 #设置rgb差值
for i in range(0,image1.size[0]): # 0到260的x坐标 0
for j in range(0,image1.size[1]): # 0到160的y坐标0
pixel1 = image1.getpixel((i,j)) #返回一个像素值的元组 <class 'tuple'>: (255, 101, 86)
pixel2 = image2.getpixel((i,j)) #<class 'tuple'>: (255, 101, 86)
res_R = abs(pixel1[0]-pixel2[0]) # 计算RGB差
res_G = abs(pixel1[1] - pixel2[1]) # 计算RGB差
res_B = abs(pixel1[2] - pixel2[2]) # 计算RGB差
if res_R > threshold and res_G > threshold and res_B > threshold:
#即判断两张图片的每个像素的色差大于五十,即锁定了缺口,
#因为滑块起点始终为0,i 的坐标,即为滑块x轴移动距离
return i # 需要移动的距离
def main_check_code(driver, element):
"""
拖动识别验证码
:param driver:
:param element:
:return:
"""
image1 = get_image(driver, '//div[@class="gt_cut_bg gt_show"]/div') #拼接无序缺口图片1
image2 = get_image(driver, '//div[@class="gt_cut_fullbg gt_show"]/div') # 拼接无序完整图片2
# 图片上 缺口的位置的x坐标
# 2 对比两张图片的所有RBG像素点,得到不一样像素点的x值,即要移动的距离
l = get_distance(image1, image2) #像素值 182
print('l=',l)
# 3 获得移动轨迹
track_list = get_track(l) #模拟人行为滑动,即匀加速在匀速
print('第一步,点击滑动按钮')
#ActionChains执行用户操作的WebDriver实例,按住元素上的鼠标左键。on_element:鼠标向下移动的元素。perform() 执行操作
ActionChains(driver).click_and_hold(on_element=element).perform() # 点击鼠标左键,按住不放
time.sleep(0.3)
print('第二步,拖动元素')
for track in track_list:
#move_by_offset将鼠标移动到当前鼠标位置的偏移量。xoffset为x轴,yoffset为y轴
ActionChains(driver).move_by_offset(xoffset=track, yoffset=0).perform() # 鼠标移动到距离当前位置(x,y)
time.sleep(0.003)
# if l>100:
ActionChains(driver).move_by_offset(xoffset=-random.randint(2,5), yoffset=0).perform()
time.sleep(0.3)
print('第三步,释放鼠标')
#释放元素上的已按住的鼠标按钮。 on_element:鼠标向上移动的元素。
ActionChains(driver).release(on_element=element).perform()
time.sleep(5)
def main_check_slider(driver):
"""
检查滑动按钮是否加载
:param driver:
:return:
"""
while True:
try :
driver.get('http://www.cnbaowen.net/api/geetest/')
element = WebDriverWait(driver, 30, 0.5).until(EC.element_to_be_clickable((By.CLASS_NAME, 'gt_slider_knob')))
if element:
return element
except TimeoutException as e:
print('超时错误,继续')
time.sleep(5)
if __name__ == '__main__':
count = 6 # 最多识别6次
chrome_path = os.path.join(os.path.dirname(__file__), "chromedriver.exe") # 拼接chrome路径
driver = webdriver.Chrome(executable_path=chrome_path) # 示列化Chrome
try:
# 等待滑动按钮加载完成
element = main_check_slider(driver) #返回一个 滑块加载的页面
while count > 0:
main_check_code(driver,element) #进行滑块验证
time.sleep(2)
try:
success_element = (By.CSS_SELECTOR, '.gt_holder .gt_ajax_tip.gt_success')
# 得到成功标志
print('suc=',driver.find_element_by_css_selector('.gt_holder .gt_ajax_tip.gt_success'))
#等待20s,直到找到成功标签
success_images = WebDriverWait(driver, 20).until(EC.presence_of_element_located(success_element))
if success_images: #存在,不为空
print('成功识别!!!!!!')
count = 0
#这里验证完成后就自动跳转,或者再加一个点击跳转,后面跟上你的爬虫数据爬取的自定义函数模块,进行解析即可
break
except NoSuchElementException as e:
print('识别错误,继续')
count -= 1
time.sleep(2)
else:
print('too many attempt check code ')
exit('退出程序')
finally:
driver.close()
| [
"1"
] | 1 |
79f634ddc2c7a0378b19100eb808178af2628c13 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/pandas/tests/test_config.py | 54db3887850ea2b768f3ff942424c6530c69a3d8 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 16,196 | py | # -*- coding: utf-8 -*-
import warnings
import pytest
import pandas as pd
class TestConfig(object):
@classmethod
def setup_class(cls):
from copy import deepcopy
cls.cf = pd.core.config
cls.gc = deepcopy(getattr(cls.cf, '_global_config'))
cls.do = deepcopy(getattr(cls.cf, '_deprecated_options'))
cls.ro = deepcopy(getattr(cls.cf, '_registered_options'))
def setup_method(self, method):
setattr(self.cf, '_global_config', {})
setattr(self.cf, 'options', self.cf.DictWrapper(
self.cf._global_config))
setattr(self.cf, '_deprecated_options', {})
setattr(self.cf, '_registered_options', {})
# Our test fixture in conftest.py sets "chained_assignment"
# to "raise" only after all test methods have been setup.
# However, after this setup, there is no longer any
# "chained_assignment" option, so re-register it.
self.cf.register_option('chained_assignment', 'raise')
def teardown_method(self, method):
setattr(self.cf, '_global_config', self.gc)
setattr(self.cf, '_deprecated_options', self.do)
setattr(self.cf, '_registered_options', self.ro)
def test_api(self):
# the pandas object exposes the user API
assert hasattr(pd, 'get_option')
assert hasattr(pd, 'set_option')
assert hasattr(pd, 'reset_option')
assert hasattr(pd, 'describe_option')
def test_is_one_of_factory(self):
v = self.cf.is_one_of_factory([None, 12])
v(12)
v(None)
pytest.raises(ValueError, v, 1.1)
def test_register_option(self):
self.cf.register_option('a', 1, 'doc')
# can't register an already registered option
pytest.raises(KeyError, self.cf.register_option, 'a', 1, 'doc')
# can't register an already registered option
pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d1', 1,
'doc')
pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d2', 1,
'doc')
# no python keywords
pytest.raises(ValueError, self.cf.register_option, 'for', 0)
pytest.raises(ValueError, self.cf.register_option, 'a.for.b', 0)
# must be valid identifier (ensure attribute access works)
pytest.raises(ValueError, self.cf.register_option,
'Oh my Goddess!', 0)
# we can register options several levels deep
# without predefining the intermediate steps
# and we can define differently named options
# in the same namespace
self.cf.register_option('k.b.c.d1', 1, 'doc')
self.cf.register_option('k.b.c.d2', 1, 'doc')
def test_describe_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b', 1, 'doc2')
self.cf.deprecate_option('b')
self.cf.register_option('c.d.e1', 1, 'doc3')
self.cf.register_option('c.d.e2', 1, 'doc4')
self.cf.register_option('f', 1)
self.cf.register_option('g.h', 1)
self.cf.register_option('k', 2)
self.cf.deprecate_option('g.h', rkey="k")
self.cf.register_option('l', "foo")
# non-existent keys raise KeyError
pytest.raises(KeyError, self.cf.describe_option, 'no.such.key')
# we can get the description for any key we registered
assert 'doc' in self.cf.describe_option('a', _print_desc=False)
assert 'doc2' in self.cf.describe_option('b', _print_desc=False)
assert 'precated' in self.cf.describe_option('b', _print_desc=False)
assert 'doc3' in self.cf.describe_option('c.d.e1', _print_desc=False)
assert 'doc4' in self.cf.describe_option('c.d.e2', _print_desc=False)
# if no doc is specified we get a default message
# saying "description not available"
assert 'vailable' in self.cf.describe_option('f', _print_desc=False)
assert 'vailable' in self.cf.describe_option('g.h', _print_desc=False)
assert 'precated' in self.cf.describe_option('g.h', _print_desc=False)
assert 'k' in self.cf.describe_option('g.h', _print_desc=False)
# default is reported
assert 'foo' in self.cf.describe_option('l', _print_desc=False)
# current value is reported
assert 'bar' not in self.cf.describe_option('l', _print_desc=False)
self.cf.set_option("l", "bar")
assert 'bar' in self.cf.describe_option('l', _print_desc=False)
def test_case_insensitive(self):
self.cf.register_option('KanBAN', 1, 'doc')
assert 'doc' in self.cf.describe_option('kanbaN', _print_desc=False)
assert self.cf.get_option('kanBaN') == 1
self.cf.set_option('KanBan', 2)
assert self.cf.get_option('kAnBaN') == 2
# gets of non-existent keys fail
pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
self.cf.deprecate_option('KanBan')
assert self.cf._is_deprecated('kAnBaN')
def test_get_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
# gets of existing keys succeed
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
assert self.cf.get_option('b.b') is None
# gets of non-existent keys fail
pytest.raises(KeyError, self.cf.get_option, 'no_such_option')
def test_set_option(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
assert self.cf.get_option('b.b') is None
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
self.cf.set_option('b.b', 1.1)
assert self.cf.get_option('a') == 2
assert self.cf.get_option('b.c') == 'wurld'
assert self.cf.get_option('b.b') == 1.1
pytest.raises(KeyError, self.cf.set_option, 'no.such.key', None)
def test_set_option_empty_args(self):
pytest.raises(ValueError, self.cf.set_option)
def test_set_option_uneven_args(self):
pytest.raises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c')
def test_set_option_invalid_single_argument_type(self):
pytest.raises(ValueError, self.cf.set_option, 2)
def test_set_option_multiple(self):
self.cf.register_option('a', 1, 'doc')
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('b.b', None, 'doc2')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
assert self.cf.get_option('b.b') is None
self.cf.set_option('a', '2', 'b.c', None, 'b.b', 10.0)
assert self.cf.get_option('a') == '2'
assert self.cf.get_option('b.c') is None
assert self.cf.get_option('b.b') == 10.0
def test_validation(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_text)
pytest.raises(ValueError, self.cf.register_option, 'a.b.c.d2',
'NO', 'doc', validator=self.cf.is_int)
self.cf.set_option('a', 2) # int is_int
self.cf.set_option('b.c', 'wurld') # str is_str
pytest.raises(
ValueError, self.cf.set_option, 'a', None) # None not is_int
pytest.raises(ValueError, self.cf.set_option, 'a', 'ab')
pytest.raises(ValueError, self.cf.set_option, 'b.c', 1)
validator = self.cf.is_one_of_factory([None, self.cf.is_callable])
self.cf.register_option('b', lambda: None, 'doc',
validator=validator)
self.cf.set_option('b', '%.1f'.format) # Formatter is callable
self.cf.set_option('b', None) # Formatter is none (default)
pytest.raises(ValueError, self.cf.set_option, 'b', '%.1f')
def test_reset_option(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
assert self.cf.get_option('a') == 2
assert self.cf.get_option('b.c') == 'wurld'
self.cf.reset_option('a')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'wurld'
self.cf.reset_option('b.c')
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
def test_reset_option_all(self):
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2',
validator=self.cf.is_str)
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
self.cf.set_option('a', 2)
self.cf.set_option('b.c', 'wurld')
assert self.cf.get_option('a') == 2
assert self.cf.get_option('b.c') == 'wurld'
self.cf.reset_option("all")
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b.c') == 'hullo'
def test_deprecate_option(self):
# we can deprecate non-existent options
self.cf.deprecate_option('foo')
assert self.cf._is_deprecated('foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with pytest.raises(
KeyError,
match="No such keys.s.: 'foo'"):
self.cf.get_option('foo')
assert len(w) == 1 # should have raised one warning
assert 'deprecated' in str(w[-1]) # we get the default message
self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int)
self.cf.register_option('b.c', 'hullo', 'doc2')
self.cf.register_option('foo', 'hullo', 'doc2')
self.cf.deprecate_option('a', removal_ver='nifty_ver')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('a')
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the default message
assert 'nifty_ver' in str(w[-1]) # with the removal_ver quoted
pytest.raises(
KeyError, self.cf.deprecate_option, 'a') # can't depr. twice
self.cf.deprecate_option('b.c', 'zounds!')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.get_option('b.c')
assert len(w) == 1 # should have raised one warning
assert 'zounds!' in str(w[-1]) # we get the custom message
# test rerouting keys
self.cf.register_option('d.a', 'foo', 'doc2')
self.cf.register_option('d.dep', 'bar', 'doc2')
assert self.cf.get_option('d.a') == 'foo'
assert self.cf.get_option('d.dep') == 'bar'
self.cf.deprecate_option('d.dep', rkey='d.a') # reroute d.dep to d.a
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.cf.get_option('d.dep') == 'foo'
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.cf.set_option('d.dep', 'baz') # should overwrite "d.a"
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the custom message
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.cf.get_option('d.dep') == 'baz'
assert len(w) == 1 # should have raised one warning
assert 'eprecated' in str(w[-1]) # we get the custom message
def test_config_prefix(self):
with self.cf.config_prefix("base"):
self.cf.register_option('a', 1, "doc1")
self.cf.register_option('b', 2, "doc2")
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b') == 2
self.cf.set_option('a', 3)
self.cf.set_option('b', 4)
assert self.cf.get_option('a') == 3
assert self.cf.get_option('b') == 4
assert self.cf.get_option('base.a') == 3
assert self.cf.get_option('base.b') == 4
assert 'doc1' in self.cf.describe_option('base.a', _print_desc=False)
assert 'doc2' in self.cf.describe_option('base.b', _print_desc=False)
self.cf.reset_option('base.a')
self.cf.reset_option('base.b')
with self.cf.config_prefix("base"):
assert self.cf.get_option('a') == 1
assert self.cf.get_option('b') == 2
def test_callback(self):
k = [None]
v = [None]
def callback(key):
k.append(key)
v.append(self.cf.get_option(key))
self.cf.register_option('d.a', 'foo', cb=callback)
self.cf.register_option('d.b', 'foo', cb=callback)
del k[-1], v[-1]
self.cf.set_option("d.a", "fooz")
assert k[-1] == "d.a"
assert v[-1] == "fooz"
del k[-1], v[-1]
self.cf.set_option("d.b", "boo")
assert k[-1] == "d.b"
assert v[-1] == "boo"
del k[-1], v[-1]
self.cf.reset_option("d.b")
assert k[-1] == "d.b"
def test_set_ContextManager(self):
def eq(val):
assert self.cf.get_option("a") == val
self.cf.register_option('a', 0)
eq(0)
with self.cf.option_context("a", 15):
eq(15)
with self.cf.option_context("a", 25):
eq(25)
eq(15)
eq(0)
self.cf.set_option("a", 17)
eq(17)
def test_attribute_access(self):
holder = []
def f():
options.b = 1
def f2():
options.display = 1
def f3(key):
holder.append(True)
self.cf.register_option('a', 0)
self.cf.register_option('c', 0, cb=f3)
options = self.cf.options
assert options.a == 0
with self.cf.option_context("a", 15):
assert options.a == 15
options.a = 500
assert self.cf.get_option("a") == 500
self.cf.reset_option("a")
assert options.a == self.cf.get_option("a", 0)
pytest.raises(KeyError, f)
pytest.raises(KeyError, f2)
# make sure callback kicks when using this form of setting
options.c = 1
assert len(holder) == 1
def test_option_context_scope(self):
# Ensure that creating a context does not affect the existing
# environment as it is supposed to be used with the `with` statement.
# See https://github.com/pandas-dev/pandas/issues/8514
original_value = 60
context_value = 10
option_name = 'a'
self.cf.register_option(option_name, original_value)
# Ensure creating contexts didn't affect the current context.
ctx = self.cf.option_context(option_name, context_value)
assert self.cf.get_option(option_name) == original_value
# Ensure the correct value is available inside the context.
with ctx:
assert self.cf.get_option(option_name) == context_value
# Ensure the current context is reset
assert self.cf.get_option(option_name) == original_value
def test_dictwrapper_getattr(self):
options = self.cf.options
# GH 19789
pytest.raises(self.cf.OptionError, getattr, options, 'bananas')
assert not hasattr(options, 'bananas')
| [
"[email protected]"
] | |
2eb9bf182ce1419f171b1e57e534ce9d199b59c2 | f138cfdc2f488100074d946a059f0967d76f4a70 | /tests/example/settings.py | 5331be406d20d662be7b18c66763d0753f8bd900 | [
"MIT"
] | permissive | davecap/django-subdomains | 7677a5a31ac6cf8d22391997288821af83f4d4eb | d595959a8bce8ff9605c42f367c02a91340e9a05 | refs/heads/master | 2021-01-16T20:28:55.738879 | 2012-07-04T19:56:43 | 2012-07-04T19:56:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'subdomains.middleware.SubdomainURLRoutingMiddleware',
)
ROOT_URLCONF = 'example.urls.application'
SUBDOMAIN_URLCONFS = {
None: 'example.urls.marketing',
'api': 'example.urls.api',
'www': 'example.urls.marketing',
}
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
INSTALLED_APPS = (
'django.contrib.sites',
'example',
'subdomains',
)
| [
"[email protected]"
] | |
d73bd6fb3edc4500c6a7773789366ce45328e797 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/web/v20200601/static_site.py | 63de03a3ff2b659e6b9332781a9646196d9c719a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,883 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StaticSiteArgs', 'StaticSite']
@pulumi.input_type
class StaticSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
branch: Optional[pulumi.Input[str]] = None,
build_properties: Optional[pulumi.Input['StaticSiteBuildPropertiesArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
repository_token: Optional[pulumi.Input[str]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuDescriptionArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a StaticSite resource.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] branch: The target branch in the repository.
:param pulumi.Input['StaticSiteBuildPropertiesArgs'] build_properties: Build properties to configure on the repository.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Name of the static site to create or update.
:param pulumi.Input[str] repository_token: A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
:param pulumi.Input[str] repository_url: URL for the repository of the static site.
:param pulumi.Input['SkuDescriptionArgs'] sku: Description of a SKU for a scalable resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if branch is not None:
pulumi.set(__self__, "branch", branch)
if build_properties is not None:
pulumi.set(__self__, "build_properties", build_properties)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if repository_token is not None:
pulumi.set(__self__, "repository_token", repository_token)
if repository_url is not None:
pulumi.set(__self__, "repository_url", repository_url)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def branch(self) -> Optional[pulumi.Input[str]]:
"""
The target branch in the repository.
"""
return pulumi.get(self, "branch")
@branch.setter
def branch(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "branch", value)
@property
@pulumi.getter(name="buildProperties")
def build_properties(self) -> Optional[pulumi.Input['StaticSiteBuildPropertiesArgs']]:
"""
Build properties to configure on the repository.
"""
return pulumi.get(self, "build_properties")
@build_properties.setter
def build_properties(self, value: Optional[pulumi.Input['StaticSiteBuildPropertiesArgs']]):
pulumi.set(self, "build_properties", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the static site to create or update.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="repositoryToken")
def repository_token(self) -> Optional[pulumi.Input[str]]:
"""
A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
"""
return pulumi.get(self, "repository_token")
@repository_token.setter
def repository_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository_token", value)
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> Optional[pulumi.Input[str]]:
"""
URL for the repository of the static site.
"""
return pulumi.get(self, "repository_url")
@repository_url.setter
def repository_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repository_url", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuDescriptionArgs']]:
"""
Description of a SKU for a scalable resource.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuDescriptionArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class StaticSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
branch: Optional[pulumi.Input[str]] = None,
build_properties: Optional[pulumi.Input[pulumi.InputType['StaticSiteBuildPropertiesArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
repository_token: Optional[pulumi.Input[str]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuDescriptionArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Static Site ARM resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] branch: The target branch in the repository.
:param pulumi.Input[pulumi.InputType['StaticSiteBuildPropertiesArgs']] build_properties: Build properties to configure on the repository.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] name: Name of the static site to create or update.
:param pulumi.Input[str] repository_token: A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
:param pulumi.Input[str] repository_url: URL for the repository of the static site.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[pulumi.InputType['SkuDescriptionArgs']] sku: Description of a SKU for a scalable resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StaticSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Static Site ARM resource.
:param str resource_name: The name of the resource.
:param StaticSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StaticSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
branch: Optional[pulumi.Input[str]] = None,
build_properties: Optional[pulumi.Input[pulumi.InputType['StaticSiteBuildPropertiesArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
repository_token: Optional[pulumi.Input[str]] = None,
repository_url: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuDescriptionArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StaticSiteArgs.__new__(StaticSiteArgs)
__props__.__dict__["branch"] = branch
__props__.__dict__["build_properties"] = build_properties
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["repository_token"] = repository_token
__props__.__dict__["repository_url"] = repository_url
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["custom_domains"] = None
__props__.__dict__["default_hostname"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:web:StaticSite"), pulumi.Alias(type_="azure-native:web/v20190801:StaticSite"), pulumi.Alias(type_="azure-native:web/v20200901:StaticSite"), pulumi.Alias(type_="azure-native:web/v20201001:StaticSite"), pulumi.Alias(type_="azure-native:web/v20201201:StaticSite"), pulumi.Alias(type_="azure-native:web/v20210101:StaticSite"), pulumi.Alias(type_="azure-native:web/v20210115:StaticSite"), pulumi.Alias(type_="azure-native:web/v20210201:StaticSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StaticSite, __self__).__init__(
'azure-native:web/v20200601:StaticSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StaticSite':
"""
Get an existing StaticSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StaticSiteArgs.__new__(StaticSiteArgs)
__props__.__dict__["branch"] = None
__props__.__dict__["build_properties"] = None
__props__.__dict__["custom_domains"] = None
__props__.__dict__["default_hostname"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["repository_token"] = None
__props__.__dict__["repository_url"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return StaticSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def branch(self) -> pulumi.Output[Optional[str]]:
"""
The target branch in the repository.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="buildProperties")
def build_properties(self) -> pulumi.Output[Optional['outputs.StaticSiteBuildPropertiesResponse']]:
"""
Build properties to configure on the repository.
"""
return pulumi.get(self, "build_properties")
@property
@pulumi.getter(name="customDomains")
def custom_domains(self) -> pulumi.Output[Sequence[str]]:
"""
The custom domains associated with this static site.
"""
return pulumi.get(self, "custom_domains")
@property
@pulumi.getter(name="defaultHostname")
def default_hostname(self) -> pulumi.Output[str]:
"""
The default autogenerated hostname for the static site.
"""
return pulumi.get(self, "default_hostname")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="repositoryToken")
def repository_token(self) -> pulumi.Output[Optional[str]]:
"""
A user's github repository token. This is used to setup the Github Actions workflow file and API secrets.
"""
return pulumi.get(self, "repository_token")
@property
@pulumi.getter(name="repositoryUrl")
def repository_url(self) -> pulumi.Output[Optional[str]]:
"""
URL for the repository of the static site.
"""
return pulumi.get(self, "repository_url")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuDescriptionResponse']]:
"""
Description of a SKU for a scalable resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
877058f526d932f0aac12ea9786b22eef88a0462 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.3/tests/regressiontests/cache/tests.py | dabbff3c4ad8c6f02e786633ef6625d346dfad58 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.3/tests/regressiontests/cache/tests.py | [
"[email protected]"
] | |
07ffc5e871f981299be97b62551c7b294f59e64a | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Default/FPythonCode/interestRateSpreadDeltaCurveBucketsShift.py | 5440ba998dea1a913984998dd3f6f72075eb13f5 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,483 | py | import acm
def CreateBuckets(validSpreadCurve):
buckets = acm.FArray()
for point in validSpreadCurve.Points():
bucketDef = acm.FFixedDateTimeBucketDefinition()
date = point.ActualDate()
bucketDef.FixedDate( date )
bucketDef.DiscardIfExpired( True )
if(point.Instrument()):
bucketDef.Name(point.Name())
elif(not point.Date()):
period = point.DatePeriod()
bucketDef.Name(acm.Time().DatePeriodToString(period))
buckets.Add(bucketDef)
if buckets.Size():
return buckets;
else:
return None
yieldCurveTypeEnum = acm.GetDomain('enum(IrType)')
q = acm.CreateFASQLQuery(acm.FYieldCurve, 'AND')
yieldCurveType = q.AddOpNode('OR')
yieldCurveType.AddAttrNode('Type', 'EQUAL', yieldCurveTypeEnum.Enumeration('Spread'))
yieldCurveType.AddAttrNode('Type', 'EQUAL', yieldCurveTypeEnum.Enumeration('Attribute Spread'))
validSpreadCurves = q.Select()
ael_variables = [
['Base Value', 'Base Value', 'string', acm.GetDomain('EnumRiskBaseCalculation').Enumerators(), None, 1, 0, 'Determines if Theoretical TPL or Theoretical Value (default) is used as the base for curve shifts. Different results can be arrived at if the ThTPL column includes Cash values sensitive to curves (for example via Exact FX conversions).', None, 1],
['Yield Curve', 'Yield Curve', 'FYieldCurve', validSpreadCurves, None, 1, 0, 'The attribute spread curve that will be shifted in buckets.', None, 1]
]
def ael_custom_label( parameters, dictExtra ):
label = parameters.At('Yield Curve').Name()
if parameters.At('Base Value'):
label += ", Including Cash"
return label
def ael_main_ex(parameters, dictExtra):
validSpreadCurve = parameters['Yield Curve']
baseValue = parameters['Base Value']
buckets = CreateBuckets(validSpreadCurve)
if not buckets:
return 0
resultVector = []
timeBuckets = acm.Time.CreateTimeBucketsFromDefinitions(0, buckets, None, 0, True, False, False, False, False)
for idx, bucket in enumerate(timeBuckets):
params = acm.FNamedParameters()
if idx == 0:
params.AddParameter('baseValueChoice', baseValue)
params.AddParameter('buckets', timeBuckets)
params.AddParameter('yieldCurve', validSpreadCurve)
params.Name(bucket.Name())
params.UniqueTag(bucket.Spec())
resultVector.append(params)
return resultVector
| [
"[email protected]"
] | |
02976528d092c165749236583b260d77148e8d5c | 6cbc44e497be77774c62b0d894bec03218b3b9c1 | /utils/custom_context_processors.py | 02a15c775eb2b8683d6969e39dbb424730aa982f | [
"LicenseRef-scancode-public-domain"
] | permissive | usnistgov/InternationalMetrologyResourceRegistry | 416263c8775bd70f27d8d7892f6342a7c66f7adf | d1eaee864727466c0e62f7ed2fafa034ce17ddee | refs/heads/master | 2021-01-17T19:21:07.832355 | 2016-09-23T21:33:50 | 2016-09-23T21:33:50 | 60,211,427 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | from django.conf import settings
def domain_context_processor(request):
return {
'CUSTOM_TITLE': settings.CUSTOM_TITLE if hasattr(settings, 'CUSTOM_TITLE') else '',
'CUSTOM_ORGANIZATION': settings.CUSTOM_ORGANIZATION if hasattr(settings, 'CUSTOM_ORGANIZATION') else '',
'CUSTOM_NAME': settings.CUSTOM_NAME if hasattr(settings, 'CUSTOM_NAME') else '',
'CUSTOM_SUBTITLE': settings.CUSTOM_SUBTITLE if hasattr(settings, 'CUSTOM_SUBTITLE') else '',
'CUSTOM_DATA': settings.CUSTOM_DATA if hasattr(settings, 'CUSTOM_DATA') else '',
'CUSTOM_CURATE': settings.CUSTOM_CURATE if hasattr(settings, 'CUSTOM_CURATE') else '',
'CUSTOM_EXPLORE': settings.CUSTOM_EXPLORE if hasattr(settings, 'CUSTOM_EXPLORE') else '',
'CUSTOM_COMPOSE': settings.CUSTOM_COMPOSE if hasattr(settings, 'CUSTOM_COMPOSE') else '',
'CUSTOM_URL': settings.CUSTOM_URL if hasattr(settings, 'CUSTOM_URL') else '',
} | [
"[email protected]"
] | |
aee8ed2b1303f7ec53448b742aac1467ec30e201 | eed3d7d9dcf5804d602a1acb32d535e2f49d3324 | /2018-07-01_Valid-Parentheses/solution.py | 8aece5a21d9de72a16fb7a524bcc5bdcba662466 | [] | no_license | ansonmiu0214/algorithms | e928b8a932ca3050a3c6fd5d07fae6df3fd7c5c4 | f35efb3536186dcd672c9aa91856d8d9213b1b82 | refs/heads/master | 2020-03-21T05:18:12.768425 | 2019-03-24T12:30:24 | 2019-03-24T12:30:24 | 138,154,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | #!/bin/python3
from collections import deque
"""
Stack-based approach to keep track of bracket depth in LIFO.
Running time complexity of O(n).
"""
def isValid(s):
brackets = { '(' : ')', '[': ']', '{': '}' }
stack = deque()
count = 0
for letter in s:
if letter in brackets:
stack.append(letter) # keep track of open brackets
count += 1
else:
if count == 0:
return False # not expecting closing
open_bracket = stack.pop()
count -= 1
if brackets[open_bracket] != letter:
return False # not the closing expected
return count == 0 # stack should be empty now
if __name__ == "__main__":
print("Enter bracket pattern: ", end="")
s = input().strip()
print("Pattern '{}' is {}.".format(s, "valid" if isValid(s) else "not valid"))
| [
"[email protected]"
] | |
a40e9ab681bab08ec937fc638b267119a9bf37bc | 350d6b7246d6ef8161bdfccfb565b8671cc4d701 | /Last Stone Weight.py | ba9f5c8adb1c5b151887f749b6ed3c70aee94f06 | [] | no_license | YihaoGuo2018/leetcode_python_2 | 145d5fbe7711c51752b2ab47a057b37071d2fbf7 | 2065355198fd882ab90bac6041c1d92d1aff5c65 | refs/heads/main | 2023-02-14T14:25:58.457991 | 2021-01-14T15:57:10 | 2021-01-14T15:57:10 | 329,661,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | class Solution(object):
def lastStoneWeight(self, A):
pq = [-x for x in A]
heapq.heapify(pq)
for i in xrange(len(A) - 1):
x, y = -heapq.heappop(pq), -heapq.heappop(pq)
heapq.heappush(pq, -abs(x - y))
return -pq[0] | [
"[email protected]"
] | |
56ff8e02d953cdf69263eb9a3ecb20990afd092d | 2b3ea7bb0df4be7f55d2ac188e23d801e497df8d | /fcsm_eos_api_client/models/vmware_availability_zone.py | ff4e4ce3c2bc04d8cce251739b49b4d377799d7d | [] | no_license | mikespub/fcsm-eos-api-client | 12b663b4e79ac5d86c2162dec168bfa240a85f0c | 107a3a7733c55ae6a750e32497268300c6be590e | refs/heads/master | 2020-08-01T18:13:17.229375 | 2019-10-29T14:30:56 | 2019-10-29T14:30:56 | 211,071,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,713 | py | # coding: utf-8
"""
Combined FCSM EOS API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VmwareAvailabilityZone(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available': 'bool',
'id': 'str',
'name': 'str'
}
attribute_map = {
'available': 'available',
'id': 'id',
'name': 'name'
}
def __init__(self, available=None, id=None, name=None): # noqa: E501
"""VmwareAvailabilityZone - a model defined in OpenAPI""" # noqa: E501
self._available = None
self._id = None
self._name = None
self.discriminator = None
self.available = available
self.id = id
self.name = name
@property
def available(self):
"""Gets the available of this VmwareAvailabilityZone. # noqa: E501
Determines whether the availability zone is available for use # noqa: E501
:return: The available of this VmwareAvailabilityZone. # noqa: E501
:rtype: bool
"""
return self._available
@available.setter
def available(self, available):
"""Sets the available of this VmwareAvailabilityZone.
Determines whether the availability zone is available for use # noqa: E501
:param available: The available of this VmwareAvailabilityZone. # noqa: E501
:type: bool
"""
if available is None:
raise ValueError("Invalid value for `available`, must not be `None`") # noqa: E501
self._available = available
@property
def id(self):
"""Gets the id of this VmwareAvailabilityZone. # noqa: E501
:return: The id of this VmwareAvailabilityZone. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VmwareAvailabilityZone.
:param id: The id of this VmwareAvailabilityZone. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this VmwareAvailabilityZone. # noqa: E501
:return: The name of this VmwareAvailabilityZone. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VmwareAvailabilityZone.
:param name: The name of this VmwareAvailabilityZone. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VmwareAvailabilityZone):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
5fed848b6244c2b76b7dd85998ccef56a6a34f40 | 2eae961147a9627a2b9c8449fa61cb7292ad4f6a | /openapi_client/api/tax_types_api.py | 7130750124302e90fd34997b17a2be912471ade6 | [] | no_license | kgr-eureka/SageOneSDK | 5a57cc6f62ffc571620ec67c79757dcd4e6feca7 | 798e240eb8f4a5718013ab74ec9a0f9f9054399a | refs/heads/master | 2021-02-10T04:04:19.202332 | 2020-03-02T11:11:04 | 2020-03-02T11:11:04 | 244,350,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,013 | py | # coding: utf-8
"""
Sage Business Cloud Accounting - Accounts
Documentation of the Sage Business Cloud Accounting API. # noqa: E501
The version of the OpenAPI document: 3.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import (
ApiTypeError,
ApiValueError
)
class TaxTypesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_tax_types(self, **kwargs): # noqa: E501
"""Returns all Tax Types # noqa: E501
Returns all Tax Types ### Endpoint Availability * Accounting: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 * Accounting Start: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tax_types(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool show_legacy_id: Display the legacy_id for the Tax Types.
:param int items_per_page: Returns the given number of Tax Types per request.
:param int page: Go to specific page of Tax Types
:param str attributes: Specify the attributes that you want to expose for the Tax Types (expose all attributes with 'all'). These are in addition to the base attributes (name, path)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[TaxType]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tax_types_with_http_info(**kwargs) # noqa: E501
def get_tax_types_with_http_info(self, **kwargs): # noqa: E501
"""Returns all Tax Types # noqa: E501
Returns all Tax Types ### Endpoint Availability * Accounting: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 * Accounting Start: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tax_types_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool show_legacy_id: Display the legacy_id for the Tax Types.
:param int items_per_page: Returns the given number of Tax Types per request.
:param int page: Go to specific page of Tax Types
:param str attributes: Specify the attributes that you want to expose for the Tax Types (expose all attributes with 'all'). These are in addition to the base attributes (name, path)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[TaxType], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['show_legacy_id', 'items_per_page', 'page', 'attributes'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tax_types" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'items_per_page' in local_var_params and local_var_params['items_per_page'] > 200: # noqa: E501
raise ApiValueError("Invalid value for parameter `items_per_page` when calling `get_tax_types`, must be a value less than or equal to `200`") # noqa: E501
if self.api_client.client_side_validation and 'items_per_page' in local_var_params and local_var_params['items_per_page'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `items_per_page` when calling `get_tax_types`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'show_legacy_id' in local_var_params and local_var_params['show_legacy_id'] is not None: # noqa: E501
query_params.append(('show_legacy_id', local_var_params['show_legacy_id'])) # noqa: E501
if 'items_per_page' in local_var_params and local_var_params['items_per_page'] is not None: # noqa: E501
query_params.append(('items_per_page', local_var_params['items_per_page'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'attributes' in local_var_params and local_var_params['attributes'] is not None: # noqa: E501
query_params.append(('attributes', local_var_params['attributes'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tax_types', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[TaxType]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tax_types_key(self, key, **kwargs): # noqa: E501
"""Returns a Tax Type # noqa: E501
Returns a Tax Type ### Endpoint Availability * Accounting: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 * Accounting Start: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tax_types_key(key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str key: The Tax Type Key. (required)
:param bool show_legacy_id: Display the legacy_id for the Tax Type.
:param str attributes: Specify the attributes that you want to expose for the Tax Type (expose all attributes with 'all'). These are in addition to the base attributes (name, path)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: TaxType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_tax_types_key_with_http_info(key, **kwargs) # noqa: E501
def get_tax_types_key_with_http_info(self, key, **kwargs): # noqa: E501
"""Returns a Tax Type # noqa: E501
Returns a Tax Type ### Endpoint Availability * Accounting: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 * Accounting Start: 🇨🇦, 🇩🇪, 🇪🇸, 🇫🇷, 🇬🇧, 🇮🇪, 🇺🇸 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tax_types_key_with_http_info(key, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str key: The Tax Type Key. (required)
:param bool show_legacy_id: Display the legacy_id for the Tax Type.
:param str attributes: Specify the attributes that you want to expose for the Tax Type (expose all attributes with 'all'). These are in addition to the base attributes (name, path)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(TaxType, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['key', 'show_legacy_id', 'attributes'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tax_types_key" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'key' is set
if self.api_client.client_side_validation and ('key' not in local_var_params or # noqa: E501
local_var_params['key'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `key` when calling `get_tax_types_key`") # noqa: E501
collection_formats = {}
path_params = {}
if 'key' in local_var_params:
path_params['key'] = local_var_params['key'] # noqa: E501
query_params = []
if 'show_legacy_id' in local_var_params and local_var_params['show_legacy_id'] is not None: # noqa: E501
query_params.append(('show_legacy_id', local_var_params['show_legacy_id'])) # noqa: E501
if 'attributes' in local_var_params and local_var_params['attributes'] is not None: # noqa: E501
query_params.append(('attributes', local_var_params['attributes'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tax_types/{key}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaxType', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
adbebf98c026159ef9c04143343d4151516385e5 | 60a6ba6e5f3faca2b1e17c1e90917efc3cfc561a | /aoc2015/day1/day1_part1.py | d9bbe04649059dc1e7251f5840c3bd4ee320b3d9 | [
"MIT"
] | permissive | GetPastTheMonkey/advent-of-code | f462f5e2b72d913e39484446ce92a043d455091c | 7a5ee30dbafaf8ef6f9bf9936e484efd024aa308 | refs/heads/master | 2023-01-14T09:45:00.553575 | 2022-12-25T10:59:19 | 2022-12-25T13:00:44 | 160,684,715 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from os.path import join, dirname, realpath
with open(join(dirname(realpath(__file__)), "input.txt")) as f:
c = 0
for char in f.read():
if char == '(':
c += 1
elif char == ')':
c -= 1
else:
raise ValueError("Invalid character")
print(c)
| [
"[email protected]"
] | |
883c46163f5400da29155668fbcf6818585325c3 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/iotbx/examples/recalculate_phenix_refine_r_factors.py | 55f7cb79f2b307b7a1d57a9bb37a58306055e6c0 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 2,864 | py |
"""
Simple script to read in an MTZ file produced by phenix.refine, extract the
F-obs-filtered, F-model, and R-free-flags arrays, and calculate R-factors both
for the entire dataset and for resolution shells. This serves as an example
both for processing MTZ files, and for cctbx.miller functionality.
"""
from __future__ import absolute_import, division, print_function
from iotbx.reflection_file_utils import get_r_free_flags_scores
from iotbx.file_reader import any_file
import sys
def compute_r_factors(fobs, fmodel, flags):
fmodel, fobs = fmodel.common_sets(other=fobs)
fmodel, flags = fmodel.common_sets(other=flags)
fc_work = fmodel.select(~(flags.data()))
fo_work = fobs.select(~(flags.data()))
fc_test = fmodel.select(flags.data())
fo_test = fobs.select(flags.data())
r_work = fo_work.r1_factor(fc_work)
r_free = fo_test.r1_factor(fc_test)
print("r_work = %.4f" % r_work)
print("r_free = %.4f" % r_free)
print("")
flags.setup_binner(n_bins=20)
fo_work.use_binning_of(flags)
fc_work.use_binner_of(fo_work)
fo_test.use_binning_of(fo_work)
fc_test.use_binning_of(fo_work)
for i_bin in fo_work.binner().range_all():
sel_work = fo_work.binner().selection(i_bin)
sel_test = fo_test.binner().selection(i_bin)
fo_work_bin = fo_work.select(sel_work)
fc_work_bin = fc_work.select(sel_work)
fo_test_bin = fo_test.select(sel_test)
fc_test_bin = fc_test.select(sel_test)
if fc_test_bin.size() == 0 : continue
r_work_bin = fo_work_bin.r1_factor(other=fc_work_bin,
assume_index_matching=True)
r_free_bin = fo_test_bin.r1_factor(other=fc_test_bin,
assume_index_matching=True)
cc_work_bin = fo_work_bin.correlation(fc_work_bin).coefficient()
cc_free_bin = fo_test_bin.correlation(fc_test_bin).coefficient()
legend = flags.binner().bin_legend(i_bin, show_counts=False)
print("%s %8d %8d %.4f %.4f %.3f %.3f" % (legend, fo_work_bin.size(),
fo_test_bin.size(), r_work_bin, r_free_bin, cc_work_bin, cc_free_bin))
def run(args):
mtz_in = any_file(args[0])
ma = mtz_in.file_server.miller_arrays
flags = fmodel = fobs = None
# select the output arrays from phenix.refine. This could easily be modified
# to handle MTZ files from other programs.
for array in ma :
labels = array.info().label_string()
if labels.startswith("R-free-flags"):
flags = array
elif labels.startswith("F-model"):
fmodel = abs(array)
elif labels.startswith("F-obs-filtered"):
fobs = array
if (None in [flags, fobs, fmodel]):
raise RuntimeError("Not a valid phenix.refine output file")
scores = get_r_free_flags_scores([flags], None)
test_flag_value = scores.test_flag_values[0]
flags = flags.customized_copy(data=flags.data()==test_flag_value)
compute_r_factors(fobs, fmodel, flags)
if (__name__ == "__main__"):
run(sys.argv[1:])
| [
"[email protected]"
] | |
ef1e1983b2ac1c067072b73e5cee912bd983e800 | 5ad1428a02a57c95273c43bf50ef6886397f75ac | /src/experiment_proto_sentiment.py | 9b2eabd996bf5975f75889cd7e694edfb071983f | [
"MIT"
] | permissive | icoderzqliu/protoinfomax | bcf3ad9131afd28fe09958611b2ec7d2a87a9e65 | c534b6e7df31727e14d56e7b02db4d7ba70105a9 | refs/heads/main | 2023-08-30T11:56:07.394374 | 2021-11-05T01:07:26 | 2021-11-05T01:07:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,545 | py | import os
import sys
sys.path.append(os.getcwd())
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from workspace.workspace_cls import SENT_WORDID, SENT_LABELID, SENT_WORD_MASK, SENT_ORIGINAL_TXT
import numpy
import random
from torch.utils.data import Dataset, DataLoader
import _pickle as cPickle
def read_pickle(filepath, filename):
f = open(os.path.join(filepath, filename), 'rb')
read_file = cPickle.load(f)
f.close()
return read_file
def save_pickle(filepath, filename, data):
f = open(os.path.join(filepath, filename), 'wb')
cPickle.dump(data, f)
print(" file saved to: %s"%(os.path.join(filepath, filename)))
f.close()
class RunExperiment:
def __init__(self, model, params):
self.model = model
self.params = params
def run_training_epoch(self, params, train_dl, optimizer, epoch):
RSL_PATH= HOME_DIR+'/results'
model = self.model
idx2word = params['idx2word']
total_loss = 0.
i = 0
domains = []
for b in train_dl:
x, x_len, y, y_oh, xq, xq_len, yq, yq_oh, x_ood, x_ood_len, y_ood, y_ood_oh, domain = b['X_sup'], b['X_sup_len'], b['Y_sup'], b['Y_sup_oh'], b['X_q'], b['Xq_len'], b['Y_q'], b['Y_q_oh'], b['X_neg'], b['X_neg_len'], b['Y_neg'], b['Y_neg_oh'], b['target_sets_files']
x = x.squeeze()
x_len = x_len.squeeze()
y = y.squeeze()
y_oh = y_oh.squeeze()
xq = xq.squeeze()
xq_len = xq_len.squeeze()
yq = yq.squeeze()
yq_oh = yq_oh.squeeze()
x_ood = x_ood.squeeze()
x_ood_len = x_ood_len.squeeze()
y_ood = y_ood.squeeze()
y_ood_oh = y_ood_oh.squeeze()
x_ = x
y_ = y
xq_ = xq
yq_ = yq
x_ood_ = x_ood
y_ood_ = y_ood
x = x.view(200 * self.params['min_ss_size'], self.params['max_length'])
x_len = x_len.view(200 * self.params['min_ss_size'])
y = y.view(200 * self.params['min_ss_size'])
y_oh = y_oh.view(200 * self.params['min_ss_size'], 2)
loss = model(x, x_len, y, y_oh, xq, xq_len, yq, yq_oh, x_ood, x_ood_len, y_ood)
loss.backward()
optimizer.step()
domains.append(domain)
total_loss += loss.item()
i+=1
train_loss = total_loss/i
if epoch % 10 == 0:
print("train_loss", train_loss)
state = {'epoch': epoch, 'state_dict': model.state_dict(), 'optim_dict' : optimizer.state_dict()}
torch.save(state, open(os.path.join(RSL_PATH, 'proto_sentiment_k100_%s.pth'%epoch), 'wb'))
return train_loss, domains
def run_testing_epoch(self, params, dev_dl):
model = self.model
idx2word = params['idx2word']
with torch.no_grad():
preds = []
all_dataset = []
probs = []
gts = []
avg_conf_ood = []
for dat in dev_dl:
for b in dat:
x, x_len, y, y_oh, xq, xq_len, yq, dataset = b['X_sup'], b['X_sup_len'], b['Y_sup'], b['Y_sup_oh'], b['X_q'], b['X_q_len'], b['Y_q'], b['target_set_file']
x = x.squeeze(0)
x_len = x_len.squeeze(0)
y = y.squeeze(0)
y_oh = y_oh.squeeze(0)
xq = xq.squeeze(0)
# sorting examples based on classes
srt = torch.sort(y, axis=0)
id_srt = srt[1][:,0]
x = x[id_srt]
x_len = x_len[id_srt]
y = y[id_srt]
y_oh = y_oh[id_srt]
x_cpu = x.cpu().numpy()
y_ = y.cpu().numpy()
x_str = [[idx2word[i] for i in tknids if i in idx2word and idx2word[i] != '</s>'] for tknids in x_cpu[:,0,:]]
for eid, (s, ys) in enumerate(zip(x_str, y_[:,0])):
s_ = ' '.join(s)
s_ = s_.replace('</s>', '').strip()
xq_cpu = xq.cpu().numpy()
xq_str = [[idx2word[i] for i in tknids if i in idx2word and idx2word[i] != '</s>'] for tknids in xq_cpu]
xq_str = ' '.join(xq_str[0])
pred = model._predict(x, x_len, y, xq, xq_len)
pred = pred.cpu().data.numpy()
pred_cls = numpy.argmax(pred)
conf = numpy.max(pred)
pred_cls_ = ''
yq_str = ''
if pred_cls == 0:
pred_cls_ = '1'
else:
pred_cls_ = '2'
if yq.cpu().data.numpy().tolist()[0][0] == 0:
yq_str = '1'
probs.append(pred)
gts.append(yq.cpu().data.numpy().tolist()[0][0])
elif yq.cpu().data.numpy().tolist()[0][0] == 1:
yq_str = '2'
probs.append(pred)
gts.append(yq.cpu().data.numpy().tolist()[0][0])
else:
yq_str = 'UNCONFIDENT_INTENT_FROM_SLAD'
avg_conf_ood.append(conf)
probs.append(pred)
gts.append(yq_str)
atuple = (pred_cls_, yq_str, conf)
preds.append(atuple)
all_dataset.append(dataset)
probs = numpy.array(probs)
gts = numpy.array(gts)
avg_conf_ood = numpy.mean(avg_conf_ood)
return preds, all_dataset, probs, gts, avg_conf_ood
def project_data(self, idx_, dev_dl, idx2word, epoch, str_):
RSL_PATH= HOME_DIR+'/encodeds/proto'
model = self.model
with torch.no_grad():
for i, dat in enumerate(dev_dl):
for j, sdat in enumerate(dat):
x, x_len, y, y_oh, xq, xq_len, yq, dataset = sdat['X_sup'], sdat['X_sup_len'], sdat['Y_sup'], sdat['Y_sup_oh'], sdat['X_q'], sdat['X_q_len'], sdat['Y_q'], sdat['target_set_file']
x = x.squeeze(0)
x_len = x_len.squeeze(0)
y = y.squeeze(0)
y_oh = y_oh.squeeze(0)
xq = xq.squeeze(0)
xq_len = xq_len.squeeze(0)
# sorting examples based on classes
srt = torch.sort(y, axis=0)
id_srt = srt[1][:,0]
x = x[id_srt]
x_len = x_len[id_srt]
y = y[id_srt]
y_oh = y_oh[id_srt]
sims, enc_prototype, x_sup_enc, y_sup, x_q_enc, y_q , x_sup_raw, xq_raw, y_raw_sup, y_raw, x_raw_sup_len, x_raw_q_len = model._encode(x, x_len, y, xq, xq_len, yq)
encodeds = sims, enc_prototype, x_sup_enc, y_sup, x_q_enc, y_q, x_sup_raw, xq_raw, y_raw_sup, y_raw, x_raw_sup_len, x_raw_q_len
save_pickle(RSL_PATH, 'encoded_sent_proto_k100_%s_%s_%s_%s_%s.pkl'%(str_, epoch, idx_, i, j), (dataset, encodeds))
return 0
def project_data_continue(self, i, dat, domain, idx2word, epoch, str_):
RSL_PATH= HOME_DIR+'/encodeds/proto'
model = self.model
with torch.no_grad():
x, xq, y, yq, x_len, xq_len = dat
x = x.squeeze(0)
x_len = x_len.squeeze(0)
y = y.squeeze(0)
srt = torch.sort(y, axis=0)
id_srt = srt[1][:,0]
x = x[id_srt]
x_len = x_len[id_srt]
y = y[id_srt]
sims, enc_prototype, x_sup_enc, y_sup, x_q_enc, y_q , x_sup_raw, xq_raw, y_raw_sup, y_raw, x_raw_sup_len, x_raw_q_len = model._encode(x, x_len, y, xq, xq_len, yq)
encodeds = sims, enc_prototype, x_sup_enc, y_sup, x_q_enc, y_q, x_sup_raw, xq_raw, y_raw_sup, y_raw
save_pickle(RSL_PATH, 'encoded_sent_proto_k100_%s_%s_0_%s.pkl'%(str_, epoch, i), (domain, encodeds))
return 0
def get_support_set_one_hot(self, support_set, classe_list):
cls_id_map = dict()
for lid in classe_list:
cls_id_map[lid] = len(cls_id_map)
support_set_one_hot = numpy.zeros([len(support_set),
len(support_set[0]),
len(cls_id_map)])
for k in range(len(support_set)):
for j in range(len(support_set[k])):
support_set_one_hot[k][j][cls_id_map[support_set[k][j]]] = 1.0
return support_set_one_hot
def get_one_hot(self, y_target, classe_list):
cls_id_map = dict()
for lid in classe_list:
cls_id_map[lid] = len(cls_id_map)
y_target_one_hot = numpy.zeros([len(y_target), len(cls_id_map)])
for k in range(len(y_target)):
y_target_one_hot[k][cls_id_map[y_target[k]]] = 1.0
return y_target_one_hot
| [
"[email protected]"
] | |
eb68af81c19aa405345e4b717d86a0032583c9e8 | eec267b544295bccb2ab88b13b221ff4fd3d2985 | /test_new_edi.py | d82f2d0cbabccbb7fac6c5dbe44dfe81e85531d0 | [] | no_license | ralfcam/sandbox_scripts | dda368dcf8b8d01147660dedc6d0fcae2d15f80c | 6fa53a63152c4a00396b38fb92ae7dc6f72d6b90 | refs/heads/master | 2022-05-29T02:02:24.849913 | 2020-05-01T02:23:57 | 2020-05-01T02:23:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 25 10:48:11 2016
@author: jpeacock
"""
import mtpy.core.z as mtz
import numpy as np
f1 = r"c:\Users\jpeacock\Documents\ShanesBugs\Sev_MT_Final_ga\MT001.edi"
with open(f1, 'r') as fid:
data_lines = fid.readlines()[102:]
data_dict = {}
data_find = False
for line in data_lines:
if line.find('>') >= 0 and line.find('!') == -1:
line_list = line[1:].strip().split()
key = line_list[0].lower()
if key[0] == 'z' or key[0] == 't' or key == 'freq':
data_find = True
data_dict[key] = []
else:
data_find = False
elif data_find == True and line.find('>') == -1 and line.find('!') == -1:
d_lines = line.strip().split()
for ii, dd in enumerate(d_lines):
# check for empty values and set them to 0, check for any
# other characters sometimes there are ****** for a null
# component
try:
d_lines[ii] = float(dd)
if d_lines[ii] == 1.0e32:
d_lines[ii] = 0.0
except ValueError:
d_lines[ii] = 0.0
data_dict[key] += d_lines
## fill useful arrays
freq_arr = np.array(data_dict['freq'], dtype=np.float)
## fill impedance tensor
z_obj = mtz.Z()
z_obj.freq = freq_arr.copy()
z_obj.z = np.zeros((freq_arr.size, 2, 2), dtype=np.complex)
z_obj.z_err = np.zeros((freq_arr.size, 2, 2), dtype=np.float)
try:
z_obj.rotation_angle = data_dict['zrot']
except KeyError:
z_obj.rotation_angle = np.zeros_like(freq_arr)
z_obj.z[:, 0, 0] = np.array(data_dict['zxxr'])+\
np.array(data_dict['zxxi'])*1j
z_obj.z[:, 0, 1] = np.array(data_dict['zxyr'])+\
np.array(data_dict['zxyi'])*1j
z_obj.z[:, 1, 0] = np.array(data_dict['zyxr'])+\
np.array(data_dict['zyxi'])*1j
z_obj.z[:, 1, 1] = np.array(data_dict['zyyr'])+\
np.array(data_dict['zyyi'])*1j
z_obj.z_err[:, 0, 0] = np.array(data_dict['zxx.var'])
z_obj.z_err[:, 0, 1] = np.array(data_dict['zxy.var'])
z_obj.z_err[:, 1, 0] = np.array(data_dict['zyx.var'])
z_obj.z_err[:, 1, 1] = np.array(data_dict['zyy.var'])
| [
"[email protected]"
] | |
41dcc0f46adda32291cfcb69a957b1b3ffce535f | 8e311f8f94c9d218bd37f81c0badc906d78d6b33 | /env/Lib/site-packages/reversion/__init__.py | e3b63c498750aeee9563ccdc26d3ceff9e27f228 | [
"MIT"
] | permissive | htwenhe/DJOA | d76307ff8752c1e2a89101de1f74094b94bf9b18 | 3c2d384a983e42dedfd72561353ecf9370a02115 | refs/heads/master | 2021-09-03T21:49:28.267986 | 2018-01-12T08:12:55 | 2018-01-12T08:12:55 | 108,937,324 | 0 | 1 | MIT | 2018-01-12T08:06:50 | 2017-10-31T02:59:26 | Python | UTF-8 | Python | false | false | 923 | py | """
An extension to the Django web framework that provides version control for model instances.
Developed by Dave Hall.
<http://www.etianen.com/>
"""
try:
import django # noqa
except ImportError: # pragma: no cover
# The top-level API requires Django, which might not be present if setup.py
# is importing reversion to get __version__.
pass
else:
from reversion.errors import ( # noqa
RevertError,
RevisionManagementError,
RegistrationError,
)
from reversion.revisions import ( # noqa
is_active,
is_manage_manually,
get_user,
set_user,
get_comment,
set_comment,
get_date_created,
set_date_created,
add_meta,
add_to_revision,
create_revision,
register,
is_registered,
unregister,
get_registered_models,
)
__version__ = VERSION = (2, 0, 10)
| [
"[email protected]"
] | |
bd94c819b0a4ffdedd7fe7221a210a1d599e191d | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/course-schedule-ii.py | 584064f35bc6f961716bc46e8c017dda5ee86015 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 1,129 | py | from collections import defaultdict, deque
class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
res, zero_in_degree_queue = [], deque()
in_degree, out_degree = defaultdict(set), defaultdict(set)
for i, j in prerequisites:
in_degree[i].add(j)
out_degree[j].add(i)
for i in xrange(numCourses):
if i not in in_degree:
zero_in_degree_queue.append(i)
while zero_in_degree_queue:
prerequisite = zero_in_degree_queue.popleft()
res.append(prerequisite)
if prerequisite in out_degree:
for course in out_degree[prerequisite]:
in_degree[course].discard(prerequisite)
if not in_degree[course]:
zero_in_degree_queue.append(course)
del out_degree[prerequisite]
if out_degree:
return []
return res
| [
"[email protected]"
] | |
f9d2a50793292a53ca569289d3130dccc8a80386 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /gift/experiment/cifar10_classification/multicifar10_wide_resnet_gradualmixup_1000_config.py | ebe2db0f88122e4e8cd4430b15c0185d117daf04 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 4,803 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for Cifar10 classification with end2end training."""
import ml_collections
def get_config():
"""Returns the experiment configuration for WideResnet on Cifar10."""
config = ml_collections.ConfigDict()
config.experiment_name = 'wide_resnet_cifar10_cls_gradual_mixup'
# Train mode
config.train_mode = 'self_adaptive_gradual_mixup'
config.pretrained = ml_collections.ConfigDict()
config.pretrained.only_backbone_pretrained = False
config.pretrained.checkpoint_path = ''
# Task
config.task_name = 'multi_env_identity_dm_cls'
config.dataset_name = 'multi_cifar10'
config.data_augmentations = ['random_flip']
config.train_environments = ['cifar', 'translated']
config.eval_environments = ['cifar', 'translated']
config.labeled_environments = ['cifar']
config.unlabeled_environments = ['translated']
# Model and data dtype
config.model_dtype_str = 'float32'
config.data_dtype_str = 'float32'
config.model_name = 'wide_resnet'
config.blocks_per_group = 4
config.channel_multiplier = 10
config.num_outputs = 10
config.dropout_rate = 0.0
config.output_dim = 10
# Training
config.optimizer = 'adam'
config.opt_hparams = {'weight_decay': 0.001}
config.l2_decay_factor = .0
config.max_grad_norm = 5.0
config.label_smoothing = None
config.num_training_steps = 1000
config.num_training_epochs = None
config.eval_frequency = 100
config.batch_size = 512
config.eval_batch_size = 512
config.rng_seed = 0
# Learning rate
config.steps_per_epoch = 50000 // config.batch_size
config.total_steps = config.num_training_steps
config.base_lr = 0.000002 * (config.batch_size / 256)
config.lr_hparams = {
'learning_rate_schedule': 'compound',
'factors': 'constant * decay_every',
'initial_learning_rate': config.base_lr,
'steps_per_decay': 100,
'decay_factor': 0.99,
}
# Pipeline params
config.confidence_quantile_threshold = 0.3
config.self_supervised_label_transformation = 'sharp'
config.label_temp = 0.5
config.self_training_iterations = 5
config.reinitialize_optimizer_at_each_step = False
config.restart_learning_rate = False
config.pseudo_labels_train_mode = False
config.stop_gradient_for_interpolations = True
config.ground_truth_factor_params = {'mode': 'constant', 'initial_value': 0.0}
config.inter_env_interpolation = False
config.intra_env_interpolation = False
config.unlabeled_interpolation = True
config.mixup_layer_set = [0, 1, 2, 3]
config.interpolation_method = 'plain_convex_combination'
config.intra_interpolation_method = 'plain_convex_combination'
config.interpolation_mode = 'hard'
config.ot_label_cost = 0.1
config.ot_l2_cost = 0.0000000
config.ot_noise_cost = 0.0
config.intra_mixup_factor_params = {'mode': 'constant', 'initial_value': 0.0}
config.beta_schedule_params = {'mode': 'constant', 'initial_value': 1.0}
config.alpha_schedule_param = {'mode': 'constant', 'initial_value': 1.0}
config.inter_mixup_factor_params = {'mode': 'constant', 'initial_value': 0.0}
config.inter_beta_schedule_params = {'mode': 'constant', 'initial_value': 1.0}
config.inter_alpha_schedule_param = {'mode': 'constant', 'initial_value': 1.0}
config.unlabeled_mixup_factor_params = {
'mode': 'constant',
'initial_value': 1.0
}
config.unlabeled_beta_params = {
'mode': 'linear_decay',
'initial_value': 10,
'min_value': 0,
'total_steps': config.total_steps,
'num_steps': config.self_training_iterations
}
config.unlabeled_alpha_params = {
'mode': 'linear_grow',
'initial_value': 1,
'max_value': 10,
'total_steps': config.total_steps,
'num_steps': config.self_training_iterations
}
# IRM related
config.penalty_weight = 0.0
config.penalty_anneal_iters = 0
# Continual learning related:
config.gift_factor = 0.001
# Domain Mapper related:
config.aux_weight = 0
config.aux_l2 = 0
# logging
config.write_summary = True # write TB and XM summary
config.checkpoint = True # do checkpointing
config.keep_ckpts = 3
config.keep_env_ckpts = False
config.write_xm_measurements = True
config.trial = 0
return config
| [
"[email protected]"
] | |
bc3feca74db58c5140fd26ee1ae452b49921fbcf | 178109bccf5014a97d74054d8f40511a241625fe | /signature/signature.py | 258257f75fa208b6755eb6441b82953b60aaf0a3 | [] | no_license | rajeshwarg/signature-disambiguation | 1b6d46a144feb71ca7efe7e7f4105cc3d93b548a | 2b10c4f1ec4ca7785d812ea8de679ab681d0d98f | refs/heads/master | 2021-01-19T11:54:39.793623 | 2014-04-28T21:28:43 | 2014-04-28T21:28:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,050 | py | import matplotlib.pyplot as plt
import numpy as np
from skimage.data import imread
from skimage import io
from skimage.color import rgb2gray
from skimage.measure import find_contours
from skimage.transform import probabilistic_hough_line
from itertools import cycle
from pylab import *
from PIL import Image
import os
import shutil
import sys
import pandas as pd
from skimage.morphology import square, erosion
from skimage.filter import hsobel
from scipy.spatial.distance import euclidean
from scipy.signal import convolve2d, convolve
## CONSTANTS ##
CONTOUR_MINLENGTH = 200
def compute_contours(img, length=300, value=0.1):
"""
Given an Image object, finds the contours. Filters
the contours by how long they are (this is the optional length
argument)
Returns:
ret_contours (list of contours),
ret_lengths (list of lengths of each contour in ret_contours)
"""
length = CONTOUR_MINLENGTH
contours = find_contours(img, value)
contour_lengths = [len(x[:, 1]) for x in contours]
ret_contours = []
ret_lengths = []
for contour in contours:
if (contour.shape[0] >= length):
ret_contours.append(contour)
ret_lengths.append(contour.shape[0])
return ret_contours, ret_lengths
def get_boundingboxes(contours, plot=False):
"""
Given a list of contours, computes the bounding box
for each and returns the list
"""
boxes = []
for contour in contours:
# compute bounding box coordinates
minx = miny = float('inf')
maxx = maxy = float('-inf')
minx = min(minx, min(contour, key=lambda x: x[1])[1])
miny = min(miny, min(contour, key=lambda x: x[0])[0])
maxx = max(maxx, max(contour, key=lambda x: x[1])[1])
maxy = max(maxy, max(contour, key=lambda x: x[0])[0])
if plot:
x = (minx, maxx, maxx, minx, minx)
y = (miny, miny, maxy, maxy, miny)
plt.plot(x,y,'-b',linewidth=2)
boxes.append( map(int,(minx,miny,maxx,maxy)) )
return boxes
def boundingbox(contour):
"""
Given a list of contours, computes the bounding box
for each and returns the list
"""
# compute bounding box coordinates
minx = miny = float('inf')
maxx = maxy = float('-inf')
minx = int(min(minx, min(contour, key=lambda x: x[1])[1]))
miny = int(min(miny, min(contour, key=lambda x: x[0])[0]))
maxx = int(max(maxx, max(contour, key=lambda x: x[1])[1]))
maxy = int(max(maxy, max(contour, key=lambda x: x[0])[0]))
return (minx,miny,maxx,maxy)
def boundingboxcorners(box):
minx,miny,maxx,maxy = box
corners = []
for x in (minx,maxx):
for y in (miny,maxy):
corners.append((x,y))
return corners
def mergeboxes(box1,box2):
minx1,miny1,maxx1,maxy1 = box1
minx2,miny2,maxx2,maxy2 = box2
minx = min(minx1,minx2)
maxx = max(maxx1,maxx2)
miny = min(miny1,miny2)
maxy = max(maxy1,maxy2)
return (minx,miny,maxx,maxy)
def is_box_in_box(corners1, corners2):
"""
returns True if corners1 is in-part contained
inside corners2
"""
min_x = min(map(lambda x: x[0], corners2))
max_x = max(map(lambda x: x[0], corners2))
min_y = min(map(lambda x: x[1], corners2))
max_y = max(map(lambda x: x[1], corners2))
width = max_x - min_x
height = max_y - min_y
for p in corners1:
if p[0] >= min_x and p[1] >= min_y and \
p[0] < min_x+width and p[1] < min_y+height:
return True
return False
def do_merge(corners1, corners2):
for corner1 in corners1:
for corner2 in corners2:
if euclidean(corner1,corner2) < 100:
return True
if is_box_in_box(corners1, corners2) or is_box_in_box(corners2, corners1):
return True
return False
def link_contours(contours):
# check overlaps
# remove flat lines
merged = True
boxes = map(boundingbox, contours)
iterations_left = len(boxes)
old_boxes = None
for i in range(10*len(boxes)):
if iterations_left == 0:
print 'none',i,boxes
break
box1 = boxes.pop(0)
iterations_left -= 1
corners1 = boundingboxcorners(box1)
for index,box2 in enumerate(boxes):
corners2 = boundingboxcorners(box2)
if do_merge(corners1, corners2):
boxes.pop(index)
boxes.append(mergeboxes(box1,box2))
iterations_left += 1
merged=True
break
else:
if box1 not in boxes:
boxes.append(box1)
merged = False
return boxes
def process(filename):
imagepath = os.path.join(os.getcwd(), filename)
orig_img = io.imread(filename,True,'pil')
img = orig_img > 0.9 # binary threshold
lines = probabilistic_hough_line(hsobel(img),line_length=200)
for l in lines:
x0, x1 = l[0][0],l[1][0]
y = l[0][1]
for x in range(x0,x1):
img[y+1,x] = 1
img[y,x] = 1
img[y-1,x] = 1
erode_img = erosion(img, square(2))
contours, lengths = compute_contours(erode_img,0.8)
lengths = pd.Series(lengths)
lengths = lengths[lengths > 400]
for i in lengths.index:
contour = contours[i]
box = get_boundingboxes([contour])[0]
x_sum = sum(map(abs, np.gradient(contour[:,1])))
y_sum = sum(map(abs, np.gradient(contour[:,0])))
area = (box[2] - box[0]) * (box[3] - box[1])
plt.plot(contour[:,1],contour[:,0])
contours = [contours[i] for i in lengths.index]
newboxes = set(link_contours(contours))
retboxes = []
for box in newboxes:
minx,miny,maxx,maxy = box
x = (minx, maxx, maxx, minx, minx)
y = (miny, miny, maxy, maxy, miny)
area = (maxx-minx) * (maxy-miny)
if area > 10000:
retboxes.append(box)
plt.plot(x, y, '-b', linewidth=2)
imshow(erode_img)
return retboxes, contours
def output(contours,shape=(126,126),outputfile='signatures.csv'):
"""
Take the set of all contours that we have identified as possible signatures
and resize them all into a canonical shape (the best shape and the best
method for doing so have yet to be determined) so we can train a classifier
on the pixels. We want to do unsupervised clustering to separate the
signatures from non-signatures
"""
from scipy import resize
with open(outputfile,'a') as f:
for c in contours:
newc = map(int, resize(c, shape).flatten())
f.write('\t'.join(map(str, newc))+'\n')
if __name__=='__main__':
plt.gray()
f = plt.figure(figsize=(16,12))
filename = sys.argv[1]
basename = ''.join(filename.split('/')[-1].split('.')[:-1])
boxes, contours = process(filename)
output(contours)
plt.savefig(basename+'-signature.png')
if len(sys.argv) > 2:
shutil.move(basename+'-signature.png',sys.argv[2])
| [
"[email protected]"
] | |
0c3b7cd7d2c76d0d5aaee1d1c5f1f0d14ecccbb4 | f47bfd6d1f6e2040c070086a6c0b7f279dfebb6a | /brick/check_point_subclass_dict.py | d198eff7693fb0a1dd818062f964ad95e1f006b8 | [
"BSD-3-Clause"
] | permissive | metehangelgi/scrabble | b34844cbd17a5588f69af22eb04dfe8d89d14bf3 | 6d64be2e9c7d0392332592c804eb15c20a3e2516 | refs/heads/master | 2022-03-26T12:17:06.133378 | 2018-11-25T03:42:56 | 2018-11-25T03:42:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | import json
with open('point_subclass_dict.json', 'r') as fp:
d = json.load(fp)
found_tagsets = set()
redundant_tagsets = set()
for superclass, tagsets in d.items():
redundant_tagsets.union(set([tagset for tagset in tagsets \
if tagset in found_tagsets]))
found_tagsets.union(set(tagsets))
print(redundant_tagsets)
| [
"[email protected]"
] | |
4bbc331673aca10bf2d8e93f9f0ce957c28ecf56 | 92bf406d734e5ffe32a369e9e3efa327bf2b926f | /paperboy/config/application.py | e0b857d9781e33a48c281deb2a0fd7b7f7352cc0 | [
"Apache-2.0"
] | permissive | samanalysis/paperboy | ee0e299f93f60f8d69804577e82d21833b9c6c43 | 2e20cffdea8567e560223996b855e64faf3f3c02 | refs/heads/master | 2020-06-20T09:20:27.721493 | 2019-06-15T15:24:18 | 2019-06-15T15:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,346 | py | import falcon
import logging
import os
from six.moves.urllib_parse import urljoin
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from traitlets.config.application import Application
from traitlets import Int, Instance, List, Tuple, Unicode, Bool, validate, TraitError
from uuid import uuid4
# falcon api
from ..server.api import FalconAPI
# deployer
from ..server.deploy import FalconDeploy
# base configs
from .user import UserConfig
from .notebook import NotebookConfig
from .job import JobConfig
from .report import ReportConfig
from .scheduler import AirflowSchedulerConfig
from .storage import SQLAStorageConfig
from .output import LocalOutputConfig
# no auth
from ..middleware import NoUserMiddleware, NoAuthRequiredMiddleware
# essential middleware
from ..middleware import CORSMiddleware, MultipartMiddleware
# sql
from ..storage.sqla import Base
from ..storage.sqla import UserSQLStorage, NotebookSQLStorage, JobSQLStorage, ReportSQLStorage
from ..middleware import SQLAlchemySessionMiddleware, SQLUserMiddleware, SQLAuthRequiredMiddleware
class Paperboy(Application):
"""Base class for paperboy applications"""
name = 'paperboy'
description = 'paperboy'
############
# Gunicorn #
############
workers = Int(default_value=1, help="Number of gunicorn workers").tag(config=True)
port = Unicode(default_value='8080', help="Port to run on").tag(config=True)
############
##########
# Falcon #
##########
api = Instance(falcon.API, help="A Falcon API instance").tag(config=True)
##########
########
# URLs #
########
baseurl = Unicode(default_value='/', help="Base URL (for reverse proxies)").tag(config=True)
apiurl = Unicode(default_value='/api/v1/', help="API base URL (for reverse proxies)").tag(config=True)
loginurl = Unicode(default_value='login', help="login url").tag(config=True)
logouturl = Unicode(default_value='logout', help="logout url").tag(config=True)
registerurl = Unicode(default_value='register', help="register url").tag(config=True)
########
########
# Auth #
########
http = Bool(default_value=True, help="Running on HTTP (as opposed to https, so token is insecure)").tag(config=True)
include_password = Bool(default_value=False).tag(config=True)
include_register = Bool(default_value=True).tag(config=True)
token_timeout = Int(default_value=600).tag(config=True)
#############
##########
# Config #
##########
# FIXME doesnt allow default_value yet
user_config = UserConfig
notebook_config = NotebookConfig
job_config = JobConfig
report_config = ReportConfig
##########
##############
# Middleware #
##############
essential_middleware = [CORSMiddleware(allow_all_origins=True).middleware,
MultipartMiddleware()]
extra_middleware = List(default_value=[]) # List of extra middlewares to install
auth_required_middleware = Instance(object)
load_user_middleware = Instance(object)
##############
##################
# Custom handler #
##################
extra_handlers = List(trait=Tuple(), default_value=[]) # List of tuples (route, handler) of handlers to install
##################
##########################################
# Predefined Configurations #
#
##########################################
backend = Unicode(default_value='dummy', help="Backend set to use, options are {sqla, custom}").tag(config=True)
auth = Unicode(default_value='dummy', help="Authentication backend set to use, options are {none, sqla, custom}").tag(config=True)
secret = Unicode()
@validate('backend')
def _validate_backend(self, proposed):
if proposed['value'] not in ('custom', 'dummy', 'git', 'sqla',):
raise TraitError('backend not recognized: %s'.format(proposed['value']))
return proposed['value']
@validate('auth')
def _validate_auth(self, proposed):
if proposed['value'] not in ('custom', 'none', 'sqla',):
raise TraitError('backend not recognized: %s'.format(proposed['value']))
return proposed['value']
##########################################
###########
# Storage #
###########
# FIXME doesnt allow default_value yet
storage = SQLAStorageConfig()
sql_dev = Bool(default_value=False)
###########
#############
# Scheduler #
#############
# FIXME doesnt allow default_value yet
scheduler = AirflowSchedulerConfig()
#############
##################
# Output #
##################
output = LocalOutputConfig()
##################
def start(self):
"""Start the whole thing"""
self.port = os.environ.get('PORT', self.port)
options = {
'bind': '0.0.0.0:{}'.format(self.port),
'workers': self.workers
}
self.secret = str(uuid4())
if self.sql_dev:
self.sql_url = 'sqlite:///:memory:'
logging.critical('Using SQL in memory backend')
self.storage.engine = create_engine(self.storage.sql_url, echo=False)
Base.metadata.create_all(self.storage.engine)
self.sessionmaker = sessionmaker(bind=self.storage.engine)
self.backend = 'sqla'
self.auth = 'sqla'
self.extra_middleware = self.extra_middleware + [SQLAlchemySessionMiddleware(self.storage.sessionmaker)]
self.storage.notebook_storage = NotebookSQLStorage
self.storage.job_storage = JobSQLStorage
self.storage.report_storage = ReportSQLStorage
self.storage.user_storage = UserSQLStorage
self.storage.sql_user = True
logging.critical('Using SQL auth')
self.auth_required_middleware = SQLAuthRequiredMiddleware
self.load_user_middleware = SQLUserMiddleware
else:
# Preconfigured storage backends
if self.backend == 'git':
logging.critical('Using Git backend')
raise NotImplementedError()
# default to sqla
# elif self.backend == 'sqla':
else:
logging.critical('Using SQL backend')
self.storage.engine = create_engine(os.environ.get('PAPERBOY_SQL_URL') or self.storage.sql_url, echo=False)
Base.metadata.create_all(self.storage.engine)
self.storage.sessionmaker = sessionmaker(bind=self.storage.engine)
self.extra_middleware = self.extra_middleware + [SQLAlchemySessionMiddleware(self.storage.sessionmaker)]
self.storage.notebook_storage = NotebookSQLStorage
self.storage.job_storage = JobSQLStorage
self.storage.report_storage = ReportSQLStorage
self.storage.user_storage = UserSQLStorage
self.storage.sql_user = True
self.auth = 'sqla'
# Preconfigured auth backends
if self.auth == 'none':
logging.critical('Using No auth')
self.auth_required_middleware = NoAuthRequiredMiddleware
self.load_user_middleware = NoUserMiddleware
elif self.auth == 'sqla':
logging.critical('Using SQL auth')
self.auth_required_middleware = SQLAuthRequiredMiddleware
self.load_user_middleware = SQLUserMiddleware
FalconDeploy(FalconAPI(self), options).run()
@classmethod
def launch_instance(cls, argv=None, **kwargs):
"""Launch an instance of a Paperboy Application"""
return super(Paperboy, cls).launch_instance(argv=argv, **kwargs)
def to_dict(self):
return {'name': self.name,
'description': self.description,
'workers': self.workers,
'port': self.port}
aliases = {
'workers': 'Paperboy.workers',
'port': 'Paperboy.port',
'baseurl': 'Paperboy.baseurl',
'backend': 'Paperboy.backend',
'auth': 'Paperboy.auth',
'sql_url': 'Paperboy.storage.sql_url',
}
def _login_redirect(config, *args, **kwargs):
raise falcon.HTTPFound(urljoin(config.baseurl, config.loginurl))
| [
"[email protected]"
] | |
2eff6562ec8a043de5548599076490f426fda71e | eedea7d2f6ad0f497f1469ab78ea00c3c33bd57a | /hamon_shu/materials/score_structure/segment_07/pitch_material_pattern.py | c24b023291d9b5b7db9bb259abe6928e367405ee | [] | no_license | GregoryREvans/hamon_shu | 750927aec941f60bf0b90ee2196a886c19c611ad | 8081ee57fce8db07c3492e67e7a634e3b08f3bb3 | refs/heads/master | 2022-02-27T06:22:44.449635 | 2022-02-10T13:48:23 | 2022-02-10T13:48:23 | 144,753,533 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,713 | py | from hamon_shu.materials.pitch import pitches_VII
key_list_1 = [
"violin_1_pitch_handler_three",
"violin_1_pitch_handler_one",
"violin_1_pitch_handler_two",
"violin_1_pitch_handler_four",
"violin_1_pitch_handler_three",
"violin_1_pitch_handler_one",
"violin_1_pitch_handler_four",
"violin_1_pitch_handler_two",
]
key_list_2 = [
"violin_2_pitch_handler_three",
"violin_2_pitch_handler_one",
"violin_2_pitch_handler_two",
"violin_2_pitch_handler_four",
"violin_2_pitch_handler_three",
"violin_2_pitch_handler_one",
"violin_2_pitch_handler_four",
"violin_2_pitch_handler_two",
]
key_list_3 = [
"viola_pitch_handler_three",
"viola_pitch_handler_one",
"viola_pitch_handler_two",
"viola_pitch_handler_four",
"viola_pitch_handler_three",
"viola_pitch_handler_one",
"viola_pitch_handler_four",
"viola_pitch_handler_two",
]
key_list_4 = [
"cello_pitch_handler_three",
"cello_pitch_handler_one",
"cello_pitch_handler_two",
"cello_pitch_handler_four",
"cello_pitch_handler_three",
"cello_pitch_handler_one",
"cello_pitch_handler_four",
"cello_pitch_handler_two",
]
dict = {
"violin_1_pitch_handler_one": pitches_VII.violin_1_pitch_handler_one,
"violin_1_pitch_handler_two": pitches_VII.violin_1_pitch_handler_two,
"violin_1_pitch_handler_three": pitches_VII.violin_1_pitch_handler_three,
"violin_1_pitch_handler_four": pitches_VII.violin_1_pitch_handler_four,
"violin_2_pitch_handler_one": pitches_VII.violin_2_pitch_handler_one,
"violin_2_pitch_handler_two": pitches_VII.violin_2_pitch_handler_two,
"violin_2_pitch_handler_three": pitches_VII.violin_2_pitch_handler_three,
"violin_2_pitch_handler_four": pitches_VII.violin_2_pitch_handler_four,
"viola_pitch_handler_one": pitches_VII.viola_pitch_handler_one,
"viola_pitch_handler_two": pitches_VII.viola_pitch_handler_two,
"viola_pitch_handler_three": pitches_VII.viola_pitch_handler_three,
"viola_pitch_handler_four": pitches_VII.viola_pitch_handler_four,
"cello_pitch_handler_one": pitches_VII.cello_pitch_handler_one,
"cello_pitch_handler_two": pitches_VII.cello_pitch_handler_two,
"cello_pitch_handler_three": pitches_VII.cello_pitch_handler_three,
"cello_pitch_handler_four": pitches_VII.cello_pitch_handler_four,
}
material_list_1 = [dict[x] for x in key_list_1]
material_list_2 = [dict[x] for x in key_list_2]
material_list_3 = [dict[x] for x in key_list_3]
material_list_4 = [dict[x] for x in key_list_4]
materials = [material_list_1, material_list_2, material_list_3, material_list_4]
pitch_material_list = []
for x in materials:
pitch_material_list.extend(x)
| [
"[email protected]"
] | |
55844d8aed6b61cd815f0b6c616ce85c2dce5750 | 5900bc2615f456512b73455203fa90c4a016230f | /mimic.py | 77ba93da838bbfae6e99cae87a3ee91c38346f59 | [] | no_license | RamiroAlvaro/google-python-exercises | 615a2a4aa6c02d7a7b74eed42119dc8402eccd4c | 50b711ca3fbcd008f28e60c53b7d5136573a44ad | refs/heads/master | 2020-12-25T15:08:36.125816 | 2017-01-27T20:24:47 | 2017-01-27T20:24:47 | 66,118,312 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
import textwrap
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
with open(filename, 'rt') as f:
text = f.read().split()
d = {'': text}
for i, item in enumerate(text):
if i < len(text) - 1 and item not in d.keys():
d[item] = text[i + 1:]
return d
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
words = []
for item in range(200):
if word not in mimic_dict:
word = ''
word = random.choice(mimic_dict[word])
words.append(word)
s = ' '.join(words)
print(textwrap.fill(s, 70))
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print('usage: ./mimic.py file-to-read')
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ce53004e1ff1be30445cc2cf0fb824a75c34b070 | f2e8afed063bef9292a2ac8d3a84943ebcef9b11 | /09_INSTAGRAM/feeds/views.py | b5126502c96408a53875d8d8ebf6f954d57c4287 | [] | no_license | seunggue/django | e8a4f376bda247b8780d4f838365b7621cde6101 | 377b73652b723be1d7bd83abd9bc9e14203799be | refs/heads/master | 2022-12-15T13:59:31.218873 | 2019-10-28T08:42:33 | 2019-10-28T08:42:33 | 200,620,105 | 0 | 0 | null | 2022-12-08T06:14:59 | 2019-08-05T08:59:41 | Python | UTF-8 | Python | false | false | 573 | py | from django.shortcuts import render, redirect
from .models import Feed
# from IPython import embed
# Create your views here.
def index(request):
feeds = Feed.objects.all()
context = {
'feeds':feeds
}
return render(request, 'index.html', context)
def create(request):
if request.method == 'POST':
content = request.POST.get('content')
image = request.FILES.get('image')
feed = Feed.objects.create(content=content, image=image)
return redirect('feeds:index')
else:
return render(request, 'form.html') | [
"[email protected]"
] | |
20d57108762761a55044954f0a80ae5d2bc49f5d | 50f0d33b12778f911fe16a4e18d0659936b9086b | /0x04-python-more_data_structures/9-multiply_by_2.py | ba5f3c37cd1dca83e69eaeffc1980d63f74de8c8 | [] | no_license | monicajoa/holbertonschool-higher_level_programming | 4f4eaa7aa2cad1642e7aed54663cb30eb92e1b4f | 451d20174144ad96fa726a4389c7aae72abf2495 | refs/heads/master | 2022-12-18T00:35:00.682624 | 2020-09-25T05:14:57 | 2020-09-25T05:14:57 | 259,479,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | #!/usr/bin/python3
def multiply_by_2(a_dictionary):
new_dictionary = {n: a_dictionary[n] * 2 for n in a_dictionary}
return (new_dictionary)
| [
"[email protected]"
] | |
76e6b924ecfda63a2b4613e3f47eb9b30e9f9c31 | 595b7157cdf72060c88b8f5b0807b984fa3e63a4 | /python/scrap_wiki.py | 3e8c74c56d0aaaaf94851c796dbe90ce65c9027f | [] | no_license | HomingYuan/data_science_way | 248d15710004eedc1f0fe70ab67318cbdc6e42aa | dd3153f44d3b4cc90b589ae0dc1d4d4f0f671da4 | refs/heads/master | 2021-01-20T14:34:54.943011 | 2018-03-06T02:00:29 | 2018-03-06T02:00:29 | 90,630,422 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Homing
@software: PyCharm Community Edition
@file: scrap_wiki.py
@time: 2017/6/7 20:46
"""
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://en.wikipedia.org/wiki/Comparison_of_text_editors")
bsObj = BeautifulSoup(html)
#The main comparison table is currently the first table on the page
table = bsObj.findAll("table",{"class":"wikitable"})[0]
rows = table.findAll("tr")
csvFile = open(r"D:\Big_data\scrap_download\editors.csv", 'wt', encoding='utf-8') # need add encoding
writer = csv.writer(csvFile)
try:
for row in rows:
csvRow = []
for cell in row.findAll(['td', 'th']):
csvRow.append(cell.get_text())
writer.writerow(csvRow)
finally:
csvFile.close()
| [
"[email protected]"
] | |
7551344d8026f5fb3005279407d84d4d63ca3528 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/egtpserveroptions_0da9d0ef7484fcc78603ede124e9e586.py | 490e482bf5889cf5c552be904b732a2bf4472d79 | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,424 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class EgtpServerOptions(Base):
"""
The EgtpServerOptions class encapsulates a list of egtpServerOptions resources that are managed by the user.
A list of resources can be retrieved from the server using the EgtpServerOptions.find() method.
The list can be managed by using the EgtpServerOptions.add() and EgtpServerOptions.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'egtpServerOptions'
_SDM_ATT_MAP = {
'DistributeUserPlaneIps': 'distributeUserPlaneIps',
'EnableCreateBearerTFTHack': 'enableCreateBearerTFTHack',
'EnableDynamicAllocation': 'enableDynamicAllocation',
'ObjectId': 'objectId',
'PcpuLogLevel': 'pcpuLogLevel',
'PublishStatistics': 'publishStatistics',
}
def __init__(self, parent):
super(EgtpServerOptions, self).__init__(parent)
@property
def DistributeUserPlaneIps(self):
"""
Returns
-------
- bool: Distribute L7 user plane IP addresses across all assigned Ixia ports.
"""
return self._get_attribute(self._SDM_ATT_MAP['DistributeUserPlaneIps'])
@DistributeUserPlaneIps.setter
def DistributeUserPlaneIps(self, value):
self._set_attribute(self._SDM_ATT_MAP['DistributeUserPlaneIps'], value)
@property
def EnableCreateBearerTFTHack(self):
"""
Returns
-------
- bool: Send the first port received from the peer activity in the Create Bearer Request TFT.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableCreateBearerTFTHack'])
@EnableCreateBearerTFTHack.setter
def EnableCreateBearerTFTHack(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableCreateBearerTFTHack'], value)
@property
def EnableDynamicAllocation(self):
"""
Returns
-------
- bool: Enable dynamic allocation of UEs and sessions on PGW.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableDynamicAllocation'])
@EnableDynamicAllocation.setter
def EnableDynamicAllocation(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableDynamicAllocation'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def PcpuLogLevel(self):
"""
Returns
-------
- str: PCPU log level
"""
return self._get_attribute(self._SDM_ATT_MAP['PcpuLogLevel'])
@PcpuLogLevel.setter
def PcpuLogLevel(self, value):
self._set_attribute(self._SDM_ATT_MAP['PcpuLogLevel'], value)
@property
def PublishStatistics(self):
"""
Returns
-------
- bool: Publish statistics for SGW.
"""
return self._get_attribute(self._SDM_ATT_MAP['PublishStatistics'])
@PublishStatistics.setter
def PublishStatistics(self, value):
self._set_attribute(self._SDM_ATT_MAP['PublishStatistics'], value)
def update(self, DistributeUserPlaneIps=None, EnableCreateBearerTFTHack=None, EnableDynamicAllocation=None, PcpuLogLevel=None, PublishStatistics=None):
"""Updates egtpServerOptions resource on the server.
Args
----
- DistributeUserPlaneIps (bool): Distribute L7 user plane IP addresses across all assigned Ixia ports.
- EnableCreateBearerTFTHack (bool): Send the first port received from the peer activity in the Create Bearer Request TFT.
- EnableDynamicAllocation (bool): Enable dynamic allocation of UEs and sessions on PGW.
- PcpuLogLevel (str): PCPU log level
- PublishStatistics (bool): Publish statistics for SGW.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, DistributeUserPlaneIps=None, EnableCreateBearerTFTHack=None, EnableDynamicAllocation=None, PcpuLogLevel=None, PublishStatistics=None):
"""Adds a new egtpServerOptions resource on the server and adds it to the container.
Args
----
- DistributeUserPlaneIps (bool): Distribute L7 user plane IP addresses across all assigned Ixia ports.
- EnableCreateBearerTFTHack (bool): Send the first port received from the peer activity in the Create Bearer Request TFT.
- EnableDynamicAllocation (bool): Enable dynamic allocation of UEs and sessions on PGW.
- PcpuLogLevel (str): PCPU log level
- PublishStatistics (bool): Publish statistics for SGW.
Returns
-------
- self: This instance with all currently retrieved egtpServerOptions resources using find and the newly added egtpServerOptions resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained egtpServerOptions resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, DistributeUserPlaneIps=None, EnableCreateBearerTFTHack=None, EnableDynamicAllocation=None, ObjectId=None, PcpuLogLevel=None, PublishStatistics=None):
"""Finds and retrieves egtpServerOptions resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve egtpServerOptions resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all egtpServerOptions resources from the server.
Args
----
- DistributeUserPlaneIps (bool): Distribute L7 user plane IP addresses across all assigned Ixia ports.
- EnableCreateBearerTFTHack (bool): Send the first port received from the peer activity in the Create Bearer Request TFT.
- EnableDynamicAllocation (bool): Enable dynamic allocation of UEs and sessions on PGW.
- ObjectId (str): Unique identifier for this object
- PcpuLogLevel (str): PCPU log level
- PublishStatistics (bool): Publish statistics for SGW.
Returns
-------
- self: This instance with matching egtpServerOptions resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of egtpServerOptions data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the egtpServerOptions resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum)
-----------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string)string
---------------------------------------
- Arg2 (str): Protocol class name to disable
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string)string
--------------------------------------
- Arg2 (str): Protocol class name to enable
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
| [
"[email protected]"
] | |
b494c0f5bfd4f8fe4d82481a52b55d9096e1edc9 | 93f47ba04fc18c4e537f0a48fe6232e2a89a4d30 | /examples/adspygoogle/dfp/v201408/creative_service/create_creative_from_template.py | c1a1728c927822b20cd10193664e4ff7cbe1cbf5 | [
"Apache-2.0"
] | permissive | jasonshih/googleads-python-legacy-lib | c56dc52a1dab28b9de461fd5db0fcd6020b84a04 | 510fad41ecf986fe15258af64b90f99a96dc5548 | refs/heads/master | 2021-04-30T22:12:12.900275 | 2015-03-06T15:35:21 | 2015-03-06T15:35:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a new template creative for a given advertiser.
To determine which companies are advertisers, run get_advertisers.py.
To determine which creative templates exist, run
get_all_creative_templates.py.
Tags: CreativeService.createCreative
"""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import base64
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Set id of the advertiser (company) that the creative will be assigned to.
ADVERTISER_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_id):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201408')
# Use the image banner with optional third party tracking template.
creative_template_id = '10000680'
image_data = open(os.path.join(__file__[:__file__.rfind('/')], '..', 'data',
'medium_rectangle.jpg'), 'r').read()
image_data = base64.encodestring(image_data)
# Create creative from templates.
creative = {
'type': 'TemplateCreative',
'name': 'Template Creative #%s' % Utils.GetUniqueName(),
'advertiserId': advertiser_id,
'size': {'width': '300', 'height': '250'},
'creativeTemplateId': creative_template_id,
'creativeTemplateVariableValues': [
{
'type': 'AssetCreativeTemplateVariableValue',
'uniqueName': 'Imagefile',
'assetByteArray': image_data,
'fileName': 'image%s.jpg' % Utils.GetUniqueName()
},
{
'type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imagewidth',
'value': '300'
},
{
'type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imageheight',
'value': '250'
},
{
'type': 'UrlCreativeTemplateVariableValue',
'uniqueName': 'ClickthroughURL',
'value': 'www.google.com'
},
{
'type': 'StringCreativeTemplateVariableValue',
'uniqueName': 'Targetwindow',
'value': '_blank'
}
]
}
# Call service to create the creative.
creative = creative_service.CreateCreative(creative)[0]
# Display results.
print ('Template creative with id \'%s\', name \'%s\', and type \'%s\' was '
'created and can be previewed at %s.'
% (creative['id'], creative['name'], creative['Creative_Type'],
creative['previewUrl']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ADVERTISER_ID)
| [
"[email protected]"
] | |
b147294d2e555d356a3bddf7311e9d144a4dc147 | 4f825250d1f3b00d4dff1601001bc72f9666f6b6 | /app/request.py | d75359b292c50cad0f63226d1a6931451ae18be7 | [] | no_license | MigotSharon/Watch-list | ee7c33fb8cc50fd6eedeaa76ee853fcd3389aadd | 139687ccbea73b7029a956a18fa7fe9b0d0458ba | refs/heads/main | 2023-01-02T00:36:08.861473 | 2020-10-24T16:54:31 | 2020-10-24T16:54:31 | 303,660,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | import urllib.request,json
from .models import Movie
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['MOVIE_API_KEY']
base_url = app.config['MOVIE_API_BASE_URL']
def get_movies(category):
'''
Function that gets the json responce to our url request
'''
get_movies_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_movies_url) as url:
get_movies_data = url.read()
get_movies_response = json.loads(get_movies_data)
movie_results = None
if get_movies_response['results']:
movie_results_list = get_movies_response['results']
movie_results = process_results(movie_results_list)
return movie_results
def get_movie(id):
get_movie_details_url = base_url.format(id,api_key)
with urllib.request.urlopen(get_movie_details_url) as url:
movie_details_data = url.read()
movie_details_response = json.loads(movie_details_data)
movie_object = None
if movie_details_response:
id = movie_details_response.get('id')
title = movie_details_response.get('original_title')
overview = movie_details_response.get('overview')
poster = movie_details_response.get('poster_path')
vote_average = movie_details_response.get('vote_average')
vote_count = movie_details_response.get('vote_count')
movie_object = Movie(id,title,overview,poster,vote_average,vote_count)
return movie_object
def search_movie(movie_name):
search_movie_url = 'https://api.themoviedb.org/3/search/movie?api_key={}&query={}'.format(api_key,movie_name)
with urllib.request.urlopen(search_movie_url) as url:
search_movie_data = url.read()
search_movie_response = json.loads(search_movie_data)
search_movie_results = None
if search_movie_response['results']:
search_movie_list = search_movie_response['results']
search_movie_results = process_results(search_movie_list)
return search_movie_results
def process_results(movie_list):
'''
Function that processes the movie result and transform them to a list of Objects
Args:
movie_list: A list of dictionaries that contain movie details
Returns :
movie_results: A list of movie objects
'''
movie_results = []
for movie_item in movie_list:
id = movie_item.get('id')
title = movie_item.get('original_title')
overview = movie_item.get('overview')
poster = movie_item.get('poster_path')
vote_average = movie_item.get('vote_average')
vote_count = movie_item.get('vote_count')
if poster:
movie_object = Movie(id,title,overview,poster,vote_average,vote_count)
movie_results.append(movie_object)
return movie_results
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.