ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df8c2369223f86ecf5c8e0734a87951bacfbdf0 | #!/usr/bin/env python
# @Time : 2019-06-01
# @Author : cayun
import sys
import argparse
import asyncio
from tawsocks.tcp_relay import TcpRelayHandler
from tawsocks.udp_relay import UdpRelayHandler
def _parse_args(args):
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-lp', '--local-port', dest='client_port', type=int,
required=True, help='本地监听端口')
arg_parser.add_argument('-rh', '--remote-host', dest='server_host', required=True, help='服务器地址')
arg_parser.add_argument('-rp', '--remote-port', dest='server_port', type=int, required=True, help='服务器端口')
arg_parser.add_argument('-P', '--password', dest='password', required=True, help='密码')
return arg_parser.parse_args(args)
def main():
config = _parse_args(sys.argv[1:])
loop = asyncio.get_event_loop()
tcp_relay_handler = TcpRelayHandler(True, config, loop)
udp_relay_handler = UdpRelayHandler(True, config, loop)
tasks = [
asyncio.ensure_future(tcp_relay_handler.start()),
asyncio.ensure_future(udp_relay_handler.start())
]
loop.run_until_complete(asyncio.wait(tasks))
try:
loop.run_forever()
finally:
loop.close()
if __name__ == '__main__':
main()
|
py | 7df8c39817d99eb94ca76ab648e5ecbf2eab3bb8 | from schemas.user import User |
py | 7df8c42eb9d4b24d5c3e6fae79f3f8b190b4cd83 | """Monte Carlo based sampling."""
import numpy as np
from equadratures.sampling_methods.sampling_template import Sampling
class Montecarlo(Sampling):
"""
The class defines a Montecarlo object. Samples are generated from the given distribution.
:param list parameters: A list of parameters, where each element of the list is an instance of the Parameter class.
:param Basis basis: An instance of the Basis class corresponding to the multi-index set used.
:param Correlations corr: An instance of Correlations object if input is correlated.
"""
def __init__(self, parameters, basis, corr=None, oversampling=7.0):
self.parameters = parameters
self.basis = basis
self.dimensions = len(self.parameters)
number_of_samples = int(self.basis.cardinality * oversampling)
self.points = self._set_points(number_of_samples, corr)
super(Montecarlo, self).__init__(self.parameters, self.basis, self.points)
def _set_points(self, number_of_samples, corr=None):
"""
Sets the quadrature points and weights.
:param Sampling self:
An instance of the sampling class.
"""
if not(corr is None):
self.points = corr.get_correlated_samples(number_of_samples)
else:
self.points = np.zeros((number_of_samples, self.dimensions))
for i in range(0, self.dimensions):
univariate_samples = self.parameters[i].get_samples(number_of_samples)
self.points[:, i] = univariate_samples[:]
return self.points
|
py | 7df8c50a0d630537c6b8ad4e59c0e27f1c90b78d | import os
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, override_settings
from .cases import StaticFilesTestCase
from .settings import TEST_ROOT
class TestFinders:
"""
Base finder test mixin.
On Windows, sometimes the case of the path we ask the finders for and the
path(s) they find can differ. Compare them using os.path.normcase() to
avoid false negatives.
"""
def test_find_first(self):
src, dst = self.find_first
found = self.finder.find(src)
self.assertEqual(os.path.normcase(found), os.path.normcase(dst))
def test_find_all(self):
src, dst = self.find_all
found = self.finder.find(src, all=True)
found = [os.path.normcase(f) for f in found]
dst = [os.path.normcase(d) for d in dst]
self.assertEqual(found, dst)
class TestFileSystemFinder(TestFinders, StaticFilesTestCase):
"""
Test FileSystemFinder.
"""
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt')
self.find_first = (os.path.join('test', 'file.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file.txt'), [test_file_path])
class TestAppDirectoriesFinder(TestFinders, StaticFilesTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt')
self.find_first = (os.path.join('test', 'file1.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path])
class TestDefaultStorageFinder(TestFinders, StaticFilesTestCase):
"""
Test DefaultStorageFinder.
"""
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=storage.StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ('media-file.txt', test_file_path)
self.find_all = ('media-file.txt', [test_file_path])
@override_settings(
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'documents')],
)
class TestMiscFinder(SimpleTestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertIsInstance(finders.get_finder(
'django.contrib.staticfiles.finders.FileSystemFinder'),
finders.FileSystemFinder)
def test_get_finder_bad_classname(self):
with self.assertRaises(ImportError):
finders.get_finder('django.contrib.staticfiles.finders.FooBarFinder')
def test_get_finder_bad_module(self):
with self.assertRaises(ImportError):
finders.get_finder('foo.bar.FooBarFinder')
def test_cache(self):
finders.get_finder.cache_clear()
for n in range(10):
finders.get_finder('django.contrib.staticfiles.finders.FileSystemFinder')
cache_info = finders.get_finder.cache_info()
self.assertEqual(cache_info.hits, 9)
self.assertEqual(cache_info.currsize, 1)
def test_searched_locations(self):
finders.find('spam')
self.assertEqual(
finders.searched_locations,
[os.path.join(TEST_ROOT, 'project', 'documents')]
)
@override_settings(STATICFILES_DIRS='a string')
def test_non_tuple_raises_exception(self):
"""
We can't determine if STATICFILES_DIRS is set correctly just by
looking at the type, but we can determine if it's definitely wrong.
"""
with self.assertRaises(ImproperlyConfigured):
finders.FileSystemFinder()
@override_settings(MEDIA_ROOT='')
def test_location_empty(self):
with self.assertRaises(ImproperlyConfigured):
finders.DefaultStorageFinder()
|
py | 7df8c51f9378c82a672e16d4c1a90d468ab3b304 | from flask import g, abort, redirect, url_for
from app.instances import db
from app.models.Answer import Answer
from app.models.Notification import Notification, NotificationType
from app.notifications.send_notification import send_notification
from app.helpers.answers import get_outgolfed_answers
from app.models.Post import Post
from app.controllers import codepage
from app.models.Language import Language
from config import posts
def create_answer(post_id, code, commentary, lang_id=None, lang_name=None, encoding='utf-8'):
"""
Creates an answer on a given post. You may provide `lang_id` if you have a
known language, or `lang_name` instead if you have a non-native language.
Do NOT provide both. This will emit a notification too.
- `401` when not logged in
- `400` when a bad `lang_id` is provided.
"""
if g.user is None:
return abort(401)
normalized_encoding = codepage.get_normalized_encoding(encoding)
if normalized_encoding is None:
return abort(400)
# Ensure language exists
if lang_id is not None and not Language.exists(lang_id):
return abort(400)
new_answer = Answer(post_id=post_id, language_name=lang_name, language_id=lang_id,
binary_code=code.decode('utf8').encode(normalized_encoding), commentary=commentary,
encoding=normalized_encoding)
g.user.answers.append(new_answer)
post = Post.query.filter_by(id=post_id).first()
post.answers.append(new_answer)
db.session.add(new_answer)
db.session.commit()
# Dispatch notification to post owner. Only dispatch if the post
# user isn't the same as the answer owner.
if post.user_id != new_answer.user_id:
send_notification(Notification(
recipient=post.user,
target_id=new_answer.id,
sender=new_answer.user,
source_id=post_id,
notification_type=NotificationType.NEW_ANSWER
))
# Dispatch notifications to outgolfed users
outgolfed_answers = get_outgolfed_answers(new_answer)
for outgolfed_answer in outgolfed_answers:
send_notification(Notification(
sender=new_answer.user,
target_id=new_answer.id,
recipient=outgolfed_answer.user,
source_id=outgolfed_answer.id,
notification_type=NotificationType.OUTGOLFED
))
return url_for('get_answer', answer_id=new_answer.id)
def get_answers(post_id, page):
page = Answer.query. \
filter_by(post_id=post_id, deleted=False) \
.order_by(Answer.score.desc(), Answer.date_created.desc()) \
.paginate(page, per_page=posts['per_page'], error_out=False)
return page
def get_answer(answer_id):
answer = Answer.query.filter_by(id=answer_id).first()
return answer
def get_page(answer, order_by=(Answer.score.desc(), Answer.date_created.desc()),
per_page=posts.get('per_page', 10)):
if answer is None:
raise ValueError
# this is inefficient but it works
answers = Answer.query.filter_by(post_id=answer.post_id, deleted=False) \
.order_by(*order_by).all()
idx = [answer.id for answer in answers].index(answer.id)
page = (idx // per_page) + 1
return page
def revise_answer(answer_id, data):
answer = get_answer(answer_id)
if answer.user_id != g.user.id:
raise PermissionError
answer, revision = answer.revise(g.user, **data)
db.session.add(revision)
db.session.commit()
return answer
|
py | 7df8c5ef4fad2550d2fca5e5e012bd2062f7eb24 | # coding=utf-8
# Copyright 2021 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tracing operations.
This file collects common tracing operations, i.e., traces that each control the
execution of programs in a specific manner. For example, 'condition' traces a
program and fixes the value of random variables; and 'tape' traces the program
and records the executed random variables onto an ordered dictionary.
"""
import collections
import contextlib
from edward2.trace import trace
from edward2.trace import traceable
@contextlib.contextmanager
def condition(**model_kwargs):
"""Context manager for setting the values of random variables.
Args:
**model_kwargs: dict of str to Tensor. Keys are the names of random variable
in the model. Values are Tensors to set their corresponding value to.
Yields:
None.
#### Examples
`condition` is typically used for binding observations to random variables
in the model, or equivalently binding posterior samples to random variables
in the model. This lets one compute likelihoods or prior densities.
```python
import edward2 as ed
def probabilistic_matrix_factorization():
users = ed.Normal(0., 1., sample_shape=[5000, 128], name="users")
items = ed.Normal(0., 1., sample_shape=[7500, 128], name="items")
ratings = ed.Normal(loc=tf.matmul(users, items, transpose_b=True),
scale=0.1,
name="ratings")
return ratings
users = tf.zeros([5000, 128])
items = tf.zeros([7500, 128])
with ed.condition(users=users, items=items):
ratings = probabilistic_matrix_factorization()
# Compute the likelihood given latent user preferences and item attributes set
# to zero matrices, p(data | users=0, items=0).
ratings.distribution.log_prob(data)
```
"""
def _condition(f, *args, **kwargs):
"""Sets random variable values to its aligned value."""
name = kwargs.get("name")
if name in model_kwargs:
kwargs["value"] = model_kwargs[name]
return traceable(f)(*args, **kwargs)
with trace(_condition):
yield
@contextlib.contextmanager
def tape():
"""Context manager for recording traceable executions onto a tape.
Similar to `tf.GradientTape`, operations are recorded if they are executed
within this context manager. In addition, the operation must be registered
(decorated) as `ed.traceable`.
Yields:
tape: OrderedDict where operations are recorded in sequence. Keys are
the `name` keyword argument to the operation (typically, a random
variable's `name`) and values are the corresponding output of the
operation. If the operation has no name, it is not recorded.
#### Examples
```python
import edward2 as ed
def probabilistic_matrix_factorization():
users = ed.Normal(0., 1., sample_shape=[5000, 128], name="users")
items = ed.Normal(0., 1., sample_shape=[7500, 128], name="items")
ratings = ed.Normal(loc=tf.matmul(users, items, transpose_b=True),
scale=0.1,
name="ratings")
return ratings
with ed.tape() as model_tape:
ratings = probabilistic_matrix_factorization()
assert model_tape["users"].shape == (5000, 128)
assert model_tape["items"].shape == (7500, 128)
assert model_tape["ratings"] == ratings
```
"""
tape_data = collections.OrderedDict({})
def record(f, *args, **kwargs):
"""Records execution to a tape."""
name = kwargs.get("name")
output = traceable(f)(*args, **kwargs)
if name:
tape_data[name] = output
return output
with trace(record):
yield tape_data
|
py | 7df8c727d384a33d0e9d59c8457075745cf6dfde | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger(__name__)
discord_edge = {
'color': {
'color': '#7289da'
},
'title': 'Discord Snowflake',
'label': '❄'
}
def parse_discord_snowflake(unfurl, node):
try:
snowflake = int(node.value)
# Some non-integer values can appear here (like @me) instead; abandon parsing
except ValueError:
return
try:
# Ref: https://discordapp.com/developers/docs/reference#snowflakes
timestamp = (snowflake >> 22) + 1420070400000
worker_id = (snowflake & 0x3E0000) >> 17
internal_process_id = (snowflake & 0x1F000) >> 17
increment = snowflake & 0xFFF
except Exception as e:
log.exception(f'Exception parsing Discord snowflake: {e}')
return
node.hover = 'Discord Snowflakes are unique, time-based IDs. ' \
'<a href="https://discordapp.com/developers/docs/reference#snowflakes" target="_blank">[ref]</a>'
unfurl.add_to_queue(
data_type='epoch-milliseconds', key=None, value=timestamp, label=f'Timestamp:\n{timestamp}',
hover='The first value in a Discord Snowflake is a timestamp associated with object creation',
parent_id=node.node_id, incoming_edge_config=discord_edge)
unfurl.add_to_queue(
data_type='integer', key=None, value=worker_id, label=f'Worker ID: {worker_id}',
hover='The second value in a Discord Snowflake is the internal worker ID',
parent_id=node.node_id, incoming_edge_config=discord_edge)
unfurl.add_to_queue(
data_type='integer', key=None, value=internal_process_id, label=f'Process ID: {internal_process_id}',
hover='The third value in a Discord Snowflake is the internal process ID',
parent_id=node.node_id, incoming_edge_config=discord_edge)
unfurl.add_to_queue(
data_type='integer', key=None, value=increment, label=f'Increment: {increment}',
hover='For every ID that is generated on that process, this number is incremented',
parent_id=node.node_id, incoming_edge_config=discord_edge)
def run(unfurl, node):
# Known patterns from main Discord site
discord_domains = ['discordapp.com', 'discordapp.net']
if any(discord_domain in unfurl.find_preceding_domain(node) for discord_domain in discord_domains):
if node.data_type == 'url.path.segment':
# Viewing a channel on a server
# Ex: https://discordapp.com/channels/427876741990711298/551531058039095296
if unfurl.check_sibling_nodes(node, data_type='url.path.segment', key=1, value='channels'):
if node.key == 2:
unfurl.add_to_queue(
data_type='description', key=None, value=None, label='Server ID',
hover='The timestamp in the associated Discord Snowflake is the time of Server creation',
parent_id=node.node_id, incoming_edge_config=discord_edge)
parse_discord_snowflake(unfurl, node)
elif node.key == 3:
unfurl.add_to_queue(
data_type='description', key=None, value=None, label='Channel ID',
hover='The timestamp in the associated Discord Snowflake is the time of Channel creation '
'on the given Server',
parent_id=node.node_id, incoming_edge_config=discord_edge)
parse_discord_snowflake(unfurl, node)
# Linking to a specific message
# Ex: https://discordapp.com/channels/427876741990711298/537760691302563843/643183730227281931
elif node.key == 4:
unfurl.add_to_queue(
data_type='description', key=None, value=None, label='Message ID',
hover='The timestamp in the associated Discord Snowflake is the time the Message was sent',
parent_id=node.node_id, incoming_edge_config=discord_edge)
parse_discord_snowflake(unfurl, node)
# File Attachment URLs
# Ex: https://cdn.discordapp.com/attachments/622136585277931532/626893414490832918/asdf.png
elif unfurl.check_sibling_nodes(node, data_type='url.path.segment', key=1, value='attachments'):
if node.key == 2:
unfurl.add_to_queue(
data_type='description', key=None, value=None, label='Channel ID',
hover='The timestamp in the associated Discord Snowflake is the time of channel creation',
parent_id=node.node_id, incoming_edge_config=discord_edge)
parse_discord_snowflake(unfurl, node)
elif node.key == 3:
unfurl.add_to_queue(
data_type='description', key=None, value=None, label='File ID',
hover='The timestamp in the associated Discord Snowflake is the time the attachment '
'was uploaded.',
parent_id=node.node_id, incoming_edge_config=discord_edge)
parse_discord_snowflake(unfurl, node)
elif node.key == 4:
unfurl.add_to_queue(
data_type='description', key=None, value=None, label='Attachment File Name',
parent_id=node.node_id, incoming_edge_config=discord_edge)
# Check if the node's value would correspond to a Snowflake with timestamp between 2015-02 and 2022-07
elif unfurl.check_if_int_between(node.value, 15000000000000000, 1000000000000000001):
parse_discord_snowflake(unfurl, node)
|
py | 7df8c72dd54e4198816ba3734c15d2b942ff4448 | # Copyright (c) 2012 The Cloudscaling Group, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova.scheduler import filters
class TypeAffinityFilter(filters.BaseHostFilter):
"""TypeAffinityFilter doesn't allow more then one VM type per host.
Note: this works best with ram_weight_multiplier
(spread) set to 1 (default).
"""
def host_passes(self, host_state, filter_properties):
"""Dynamically limits hosts to one instance type
Return False if host has any instance types other then the requested
type. Return True if all instance types match or if host is empty.
"""
instance_type = filter_properties.get('instance_type')
context = filter_properties['context'].elevated()
instances_other_type = db.instance_get_all_by_host_and_not_type(
context, host_state.host, instance_type['id'])
return len(instances_other_type) == 0
class AggregateTypeAffinityFilter(filters.BaseHostFilter):
"""AggregateTypeAffinityFilter limits instance_type by aggregate
return True if no instance_type key is set or if the aggregate metadata
key 'instance_type' has the instance_type name as a value
"""
# Aggregate data does not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type')
context = filter_properties['context'].elevated()
metadata = db.aggregate_metadata_get_by_host(
context, host_state.host, key='instance_type')
return (len(metadata) == 0 or
instance_type['name'] in metadata['instance_type'])
|
py | 7df8c79a91651caf4dd14b5708daa43d5b0e918d | # Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
# Symbol Value
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
# For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
# Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
# I can be placed before V (5) and X (10) to make 4 and 9.
# X can be placed before L (50) and C (100) to make 40 and 90.
# C can be placed before D (500) and M (1000) to make 400 and 900.
# Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.
# Example 1:
# Input: "III"
# Output: 3
# Example 2:
# Input: "IV"
# Output: 4
# Example 3:
# Input: "IX"
# Output: 9
# Example 4:
# Input: "LVIII"
# Output: 58
# Explanation: L = 50, V= 5, III = 3.
# Example 5:
# Input: "MCMXCIV"
# Output: 1994
# Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
class Solution:
def romanToInt(self, s):
cache = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
count = 0
for i in range(len(s)):
# print(s[i])
if i == 0:
count += cache[s[i]]
elif s[i] == 'V' and s[i - 1] == 'I':
count += 3
elif s[i] == 'X' and s[i - 1] == 'I':
count += 8
elif s[i] == 'L' and s[i - 1] == 'X':
count += 30
elif s[i] == 'C' and s[i - 1] == 'X':
count += 80
elif s[i] == 'D' and s[i - 1] == 'C':
count += 300
elif s[i] == 'M' and s[i - 1] == 'C':
count += 800
else:
count += cache[s[i]]
return count
s = "MCMXCIV"
print(Solution().romanToInt(s))
|
py | 7df8c7a48989855e62ad4e5c96c8609ecbbc06dd | # 进行验证
import itertools
import hashlib
import time
import sys
url = "http://127.0.0.1/dz3.3/"
idstring = "vnY6nW"
uid = 2
sign = "af3b937d0132a06b"
f_name = 'd.txt'
str_list = "0123456789abcdef"
def dsign(authkey):
uurl = "{}member.php?mod=getpasswd&uid={}&id={}".format(url, uid, idstring)
url_md5 = hashlib.md5(uurl + authkey)
return url_md5.hexdigest()[:16]
def main():
cnt = 0
with open(f_name) as f:
ranlist = [s[:-1] for s in f]
s_list = sorted(set(ranlist), key=ranlist.index)
r_list = itertools.product(str_list, repeat=6)
print "[!] start running...."
s_time = time.time()
for s in s_list:
for j in r_list:
prefix = "".join(j)
authkey = prefix + s
# print authkey
# time.sleep(1)
sys.stdout.flush()
if dsign(authkey) == sign:
print "[*] found used time: " + str(time.time() - s_time)
return "[*] authkey found: " + authkey
cnt +=1
print cnt
print main()
|
py | 7df8c845e802b7c91e01bb1af5c461792b03621d | # Copyright 2015-2018 Yelp
# Copyright 2019 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal utilities to make Python work for 2.7+ and 3.4+
Strategies for making `mrjob` work across Python versions:
Bytes vs. Unicode
-----------------
It's tempting to use `from __future__ import unicode_literals` and require
that all non-byte strings be unicode. But that doesn't really make sense for
Python 2, where str (bytes) and unicode can be used interchangeably.
So really our string datatypes fall into two categories, bytes, and
"strings", which means either ``unicode``\\s or ASCII ``str``\\s in
Python 2, and ``str``\\s (i.e. unicode) in Python 3.
These things should always be bytes:
- input data files
- use ``'b'`` when opening files: ``open(..., 'rb')``
- read data from ``sys.stdin.buffer`` in Python 3, not ``sys.stdin``
- data encoded by protocols
- data from subprocesses (this already happens by default)
- log files parsed by mrjob
- file content from our filesystem interfaces.
Instead of using ``StringIO`` to deal with these, use ``io.BytesIO``.
Note that both Python 2.6+ and Python 3.3+ have the ``bytes`` type and
``b''`` constants built-in.
These things should always be strings:
- streams that you print() to (e.g. ``sys.stdout`` if you mock it out)
- streams that you log to
- paths on filesystem
- URIs
- arguments to commands
- option names
- Hadoop counter names and groups
- Hadoop status messages
- anything else we parse out of log files
These things are strings because it makes for simpler code:
- contents of config files
- contents of scripts output by mrjob (e.g. the setup wrapper script)
- contents of empty files
Use the ``StringIO`` from this module to deal with strings (it's
``StringIO.StringIO`` in Python 2 and ``io.StringIO`` in Python 3).
Please use ``%`` for format strings and not ``format()``, which is much more
picky about mixing unicode and bytes.
We don't provide a ``unicode`` type:
- Use ``isinstance(..., string_types)`` to check if something is a string
- Use ``not isinstance(..., bytes)`` to check if a string is Unicode
- To convert ``bytes`` to ``unicode``, use ``.decode('utf_8')`.
- Python 3.3+ has ``u''`` literals; please use sparingly
If you need to convert bytes of unknown encoding to a string (e.g. to
``print()`` or log them), use ``to_unicode()`` from this module.
Iterables
---------
Using ``.iteritems()`` or ``.itervalues()`` in Python 2 to iterate over a
dictionary when you don't need a list is best practice, but it's also (in most
cases) an over-optimization. We'd prefer clean code; just use ``.items()``
and ``.values()``.
If you *do* have concerns about memory usage, ``for k in some_dict`` does not
create a list in either version of Python.
Same goes for ``xrange``; plain-old `range`` is almost always fine.
Miscellany
----------
We provide an ``integer_types`` tuple so you can check if something is an
integer: ``isinstance(..., integer_types)``.
Any standard library function that deals with URLs (e.g. ``urlparse()``) should
probably be imported from this module.
You *usually* want to do ``from __future__ import print_function`` in modules
where you use ``print()``. ``print(...)`` works fine, but
``print(..., file=...)`` doesn't, and ``print()`` prints ``()`` on Python 2.
You shouldn't need any other ``__future__`` imports.
"""
import sys
# use this to check if we're in Python 2
PY2 = (sys.version_info[0] == 2)
# ``string_types``, for ``isinstance(..., string_types)``
if PY2:
string_types = (basestring,)
else:
string_types = (str,)
string_types
# ``integer_types``, for ``isinstance(..., integer_types)``
if PY2:
integer_types = (int, long)
else:
integer_types = (int,)
integer_types
# ``StringIO``. Useful for mocking out ``sys.stdout``, etc.
if PY2:
from StringIO import StringIO
else:
from io import StringIO
StringIO # quiet, pyflakes
# ``xrange``. Plain old ``range`` is almost always fine
if PY2:
xrange = xrange
else:
xrange = range
xrange # quiet, pyflakes
# urllib stuff
# in most cases you should use ``mrjob.parse.urlparse()``
if PY2:
from urlparse import ParseResult
from urllib import pathname2url
from urlparse import urljoin
from urllib2 import urlopen
from urlparse import urlparse
else:
from urllib.parse import ParseResult
from urllib.request import pathname2url
from urllib.parse import urljoin
from urllib.request import urlopen
from urllib.parse import urljoin
from urllib.parse import urlparse
ParseResult
pathname2url
urljoin
urlopen
urlparse
def to_unicode(s):
"""Convert ``bytes`` to unicode.
Use this if you need to ``print()`` or log bytes of an unknown encoding,
or to parse strings out of bytes of unknown encoding (e.g. a log file).
This hopes that your bytes are UTF-8 decodable, but if not, falls back
to latin-1, which always works.
"""
if isinstance(s, bytes):
try:
return s.decode('utf_8')
except UnicodeDecodeError:
return s.decode('latin_1')
elif isinstance(s, string_types): # e.g. is unicode
return s
else:
raise TypeError
|
py | 7df8c936bdc024da27ce618c6069a7118927edaa | /home/runner/.cache/pip/pool/c6/4f/6c/4418d4c8d4c7b3f4ef11679b556b3519f2cf376d3c333a525ebf4e93f0 |
py | 7df8ca1a17631c5c18e4aa60d6a53a32b03de5ff | # -*- coding: utf-8 -*-
"""Global settings for the project"""
import os.path
from tornado.options import define
define("port", default=8000, help="run on the given port", type=int)
define("config", default=None, help="tornado config file")
define("debug", default=False, help="debug mode")
__BASE_PACKAGE__ = "tornado_test"
settings = {}
settings["debug"] = True
settings["cookie_secret"] = "gB9jYwVv0aodH51judoGwroWP"
settings["login_url"] = "/login"
settings["static_path"] = os.path.join(os.path.dirname(__file__), __BASE_PACKAGE__, "static")
settings["template_path"] = os.path.join(os.path.dirname(__file__), __BASE_PACKAGE__, "templates")
settings["xsrf_cookies"] = False
|
py | 7df8ca4c570295bb9b2df6d8571218a41b90664e | # Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The following code was copied from the Django project, and only lightly
# modified. Please adhere to the above copyright and license for the code
# in this file.
# Note: Nothing is covered here because this file is imported before nose and
# coverage take over.. and so its a false positive that nothing is covered.
import datetime # pragma: nocover
import os # pragma: nocover
import platform # pragma: nocover
import subprocess # pragma: nocover
import sys # pragma: nocover
from cement.core.backend import VERSION # pragma: nocover
def get_version(version=VERSION): # pragma: nocover
"Returns a PEP 386-compliant version number from VERSION."
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
# We want to explicitly include all three version/release numbers
# parts = 2 if version[2] == 0 else 3
parts = 3
main = ".".join(str(x) for x in version[:parts])
sub = ""
if version[3] == "alpha" and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = ".dev%s" % git_changeset
elif version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "c"}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_version_banner():
cement_ver = get_version()
python_ver = ".".join([str(x) for x in sys.version_info[0:3]])
plat = platform.platform()
banner = (
"Cement Framework %s\n" % cement_ver
+ "Python %s\n" % python_ver
+ "Platform %s" % plat
)
return banner
def get_git_changeset(): # pragma: nocover
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very
unlikely, so it's sufficient for generating the development version
numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
"git log --pretty=format:%ct --quiet -1 HEAD",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=repo_dir,
universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError: # pragma: nocover
return None # pragma: nocover
return timestamp.strftime("%Y%m%d%H%M%S")
|
py | 7df8ca54e453d171c8c8e14a0066328c6ab65338 |
'''
This interface is using YOLOX-Darknet to inference the bounding box information for SPIN, it will generate the json file
to store the bounding box information. Or you can also use this interface to get the list type of the boundingbox information.
You need to download this file https://pan.baidu.com/s/1WOGfN284hKZnAjYZcnLfHQ by using password:af1f and put the file into the root folder.
You also need to setup the environment of yolox by https://github.com/Megvii-BaseDetection/YOLOX.
'''
import os
import time
import cv2
import torch
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import postprocess
import json
IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
#load the file under the defined path
def get_image_list(path):
image_names = []
for maindir, subdir, file_name_list in os.walk(path):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext in IMAGE_EXT:
image_names.append(apath)
return image_names
#this class is for inference
class Predictor(object):
def __init__(
self,
model,
cls_names=COCO_CLASSES,
trt_file=None,
decoder=None,
device="cpu",
):
self.model = model
self.cls_names = cls_names
self.decoder = decoder
self.num_classes = 80
self.confthre = 0.5
self.nmsthre = 0.65
self.test_size = 640
self.device = device
self.preproc = ValTransform(legacy=False)
if trt_file is not None:
from torch2trt import TRTModule
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(trt_file))
x = torch.ones(1, 3, test_standard_imgsize[0], test_standard_imgsize[1]).cuda()
self.model(x)
self.model = model_trt
#This is main part for inference. We need to resize the image to put it into the YOLOX model
def inference(self, img):
img_info = {"id": 0}
if isinstance(img, str):
img_info["file_name"] = os.path.basename(img)
img = cv2.imread(img)
else:
img_info["file_name"] = None
height, width = img.shape[:2]
img_info["height"] = height
img_info["width"] = width
img_info["raw_img"] = img
ratio = min(self.test_size / img.shape[0], self.test_size / img.shape[1])
img_info["ratio"] = ratio
img, _ = self.preproc(img, None, [self.test_size, self.test_size])
img = torch.from_numpy(img).unsqueeze(0)
img = img.float()
if self.device == "gpu":
img = img.cuda()
with torch.no_grad():
t0 = time.time()
outputs = self.model(img)
if self.decoder is not None:
outputs = self.decoder(outputs, dtype=outputs.type())
outputs = postprocess(
outputs, self.num_classes, self.confthre,
self.nmsthre, class_agnostic=True
)
return outputs, img_info
#after we get the detection results, we should do some filter to these results. We pick the human detecion result
#and hight confidence results to return and store.
def visual(self, output, img_info, cls_conf=0.5):
ratio = img_info["ratio"]
img = img_info["raw_img"]
if output is None:
return img
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
bboxes /= ratio
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
bboxall = []
for i in range(len(bboxes)):
score = scores[i]
if score < cls_conf:
continue
if cls[i] != 0:
continue
box = bboxes[i]
bx = box.tolist()
#we should set [x_min, y_min, x_max, y_max] into form [x_min, y_min, weight, height]
bx[2] = bx[2] - bx[0]
bx[3] = bx[3] - bx[1]
bboxall.append(bx)
return bboxall
# This function use the above method to get a whole pipeline of the inference
def image_inference(predictor, path, current_time):
#get whole images list
if os.path.isdir(path):
files = get_image_list(path)
else:
files = [path]
files.sort()
#Iterative processing of all pictures
for image_name in files:
outputs, img_info = predictor.inference(image_name)
bboxes = predictor.visual(outputs[0], img_info, predictor.confthre)
return bboxes
# This is the method which can be used by import in orther file, it will store the json file and return the boundingbox result
def get_bboxes(path = 'test_img2.png', pretrained_model = 'yolox_darknet.pth', exp = get_exp('yolov3.py', None)):
model_name = 'yolov3'
model = exp.get_model()
model.cuda()
model.eval()
inference_model = torch.load(pretrained_model, map_location="cpu")
# load the model state dict
model.load_state_dict(inference_model["model"])
#logger.info("loaded checkpoint done.")
trt_file = None
decoder = None
predictor = Predictor(model, COCO_CLASSES, trt_file, decoder, 'gpu')
current_time = time.localtime()
bboxes = image_inference(predictor, path, current_time)
#we use json package to store the bounding box information
dic_bbox = {'bbox': bboxes}
with open("bbox.json", "w") as f:
json.dump(dic_bbox, f)
return bboxes
if __name__ == "__main__":
get_bboxes() |
py | 7df8cb7d9851b696115e21599317e3c3cdccbc1f | import warnings
import json
from typing import Any, List
import requests
from prefect import Task
from prefect.client import Secret
from prefect.utilities.tasks import defaults_from_attrs
class GetRepoInfo(Task):
"""
Task for retrieving GitHub repository information using the v3 version of the GitHub REST API.
Args:
- repo (str, optional): the name of the repository to open the issue in; must be provided in the
form `organization/repo_name` or `user/repo_name`; can also be provided to the `run` method
- info_keys (List[str], optional): a list of repo attributes to pull (e.g., `["stargazers_count", "subscribers_count"]`).
A full list of available keys can be found in the official [GitHub documentation](https://developer.github.com/v3/repos/)
- token_secret (str, optional, DEPRECATED): the name of the Prefect Secret containing your GitHub Access Token;
- **kwargs (Any, optional): additional keyword arguments to pass to the standard Task init method
"""
def __init__(
self,
repo: str = None,
info_keys: List[str] = None,
token_secret: str = None,
**kwargs: Any
):
self.repo = repo
self.info_keys = info_keys or []
if token_secret is not None:
warnings.warn(
"The `token` argument is deprecated. Use a `Secret` task "
"to pass the credentials value at runtime instead.",
UserWarning,
)
self.token_secret = token_secret
super().__init__(**kwargs)
@defaults_from_attrs("repo", "info_keys")
def run(
self, repo: str = None, info_keys: List[str] = None, token: str = None
) -> None:
"""
Run method for this Task. Invoked by calling this Task after initialization within a Flow context,
or by using `Task.bind`.
Args:
- repo (str, optional): the name of the repository to open the issue in; must be provided in the
form `organization/repo_name`; defaults to the one provided at initialization
- info_keys (List[str], optional): a list of repo attributes to pull (e.g., `["stargazers_count", "subscribers_count"]`).
A full list of available keys can be found in the official [GitHub documentation](https://developer.github.com/v3/repos/)
- token (str): a GitHub access token
Raises:
- ValueError: if a `repo` was never provided
- HTTPError: if the GET request returns a non-200 status code
Returns:
- dict: dictionary of the requested information
"""
if repo is None:
raise ValueError("A GitHub repository must be provided.")
## prepare the request
if token is None:
warnings.warn(
"The `token` argument is deprecated. Use a `Secret` task "
"to pass the credentials value at runtime instead.",
UserWarning,
)
token = Secret(self.token_secret).get()
url = "https://api.github.com/repos/{}".format(repo)
headers = {
"AUTHORIZATION": "token {}".format(token),
"Accept": "application/vnd.github.v3+json",
}
## send the request
resp = requests.get(url, headers=headers)
resp.raise_for_status()
data = resp.json()
return {key: data[key] for key in info_keys}
class CreateBranch(Task):
"""
Task for creating new branches using the v3 version of the GitHub REST API.
Args:
- repo (str, optional): the name of the repository to create the branch in; must be provided in the
form `organization/repo_name` or `user/repo_name`; can also be provided to the `run` method
- base (str, optional): the name of the branch you want to branch off; can also
be provided to the `run` method. Defaults to "master".
- branch_name (str, optional): the name of the new branch; can also be provided to the `run` method
- token_secret (str, optional, DEPRECATED): the name of the Prefect Secret containing your GitHub Access Token;
- **kwargs (Any, optional): additional keyword arguments to pass to the standard Task init method
"""
def __init__(
self,
repo: str = None,
base: str = "master",
branch_name: str = None,
token_secret: str = None,
**kwargs: Any
):
self.repo = repo
self.base = base
self.branch_name = branch_name
if token_secret is not None:
warnings.warn(
"The `token` argument is deprecated. Use a `Secret` task "
"to pass the credentials value at runtime instead.",
UserWarning,
)
self.token_secret = token_secret
super().__init__(**kwargs)
@defaults_from_attrs("repo", "base", "branch_name")
def run(
self,
repo: str = None,
base: str = None,
branch_name: str = None,
token: str = None,
) -> dict:
"""
Run method for this Task. Invoked by calling this Task after initialization within a Flow context,
or by using `Task.bind`.
Args:
- repo (str, optional): the name of the repository to open the issue in; must be provided in the
form `organization/repo_name`; defaults to the one provided at initialization
- base (str, optional): the name of the branch you want to branch off; if not provided here,
defaults to the one set at initialization
- branch_name (str, optional): the name of the new branch; if not provided here, defaults to
the one set at initialization
- token (str): a GitHub access token
Raises:
- ValueError: if a `repo` or `branch_name` was never provided, or if the base branch wasn't found
- HTTPError: if the GET request returns a non-200 status code
Returns:
- dict: dictionary of the response (includes commit hash, etc.)
"""
if branch_name is None:
raise ValueError("A branch name must be provided.")
if repo is None:
raise ValueError("A GitHub repository must be provided.")
## prepare the request
if token is None:
warnings.warn(
"The `token` argument is deprecated. Use a `Secret` task "
"to pass the credentials value at runtime instead.",
UserWarning,
)
token = Secret(self.token_secret).get()
url = "https://api.github.com/repos/{}/git/refs".format(repo)
headers = {
"AUTHORIZATION": "token {}".format(token),
"Accept": "application/vnd.github.v3+json",
}
## gather branch information
resp = requests.get(url + "/heads", headers=headers)
resp.raise_for_status()
branch_data = resp.json()
commit_sha = None
for branch in branch_data:
if branch.get("ref") == "refs/heads/{}".format(base):
commit_sha = branch.get("object", {}).get("sha")
break
if commit_sha is None:
raise ValueError("Base branch {} not found.".format(base))
## create new branch
new_branch = {"ref": "refs/heads/{}".format(branch_name), "sha": commit_sha}
resp = requests.post(url, headers=headers, json=new_branch)
resp.raise_for_status()
return resp.json()
|
py | 7df8cbc17febc53af5c05b61030d03b30d4fa8ce | # -*- coding: utf-8 -*-
from hostlist.models import HostList, Dzhuser
import logging
log = logging.getLogger('dzhops')
def clearUpMinionKyes(idlist, dc, eg):
'''
对Minion id进行整理,返回对应状态、机房、维护人员的minion id;
:param idlist: acp/pre/rej(分别表示已经接受、未接受、已拒绝三个状态)
:param dc: 机房英文简称
:param eg: 维护人员用户名,英文简称;
:return: 过滤后的minion id 组成的列表;
'''
if dc == 'DC_ALL' and eg == 'EG_ALL':
result = idlist
elif dc != 'DC_ALL' and eg == 'EG_ALL':
result = []
for id in idlist:
id_dcen = id.split("_")
if id_dcen[3] == dc:
result.append(id)
elif dc == 'DC_ALL' and eg != 'EG_ALL':
eg_id_list = []
engi_result = Dzhuser.objects.get(username=eg)
data = HostList.objects.filter(engineer=engi_result.engineer)
for row in data:
eg_id_list.append(row.minionid)
result = list(set(idlist).intersection(set(eg_id_list)))
elif dc != 'DC_ALL' and eg != 'EG_ALL':
dc_id_list = []
eg_id_list = []
for id in idlist:
id_dcen = id.split("_")
if id_dcen[3] == dc:
dc_id_list.append(id)
engi_result = Dzhuser.objects.get(username=eg)
data = HostList.objects.filter(engineer=engi_result.engineer)
for row in data:
eg_id_list.append(row.minionid)
result = list(set(dc_id_list).intersection(set(eg_id_list)))
else:
result = []
log.error("Unexpected execution here.")
return result |
py | 7df8ccb58396cdd77e97a985c938302218fa4588 | """
Unit test for `start-api` CLI
"""
from unittest import TestCase
from unittest.mock import patch, Mock
from parameterized import parameterized
from samcli.commands.local.start_api.cli import do_cli as start_api_cli
from samcli.commands.local.lib.exceptions import NoApisDefined, InvalidIntermediateImageError
from samcli.lib.providers.exceptions import InvalidLayerReference
from samcli.commands.exceptions import UserException
from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException
from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError
from samcli.local.docker.exceptions import ContainerNotStartableException
from samcli.local.docker.lambda_debug_settings import DebuggingNotSupported
class TestCli(TestCase):
def setUp(self):
self.template = "template"
self.env_vars = "env-vars"
self.debug_ports = [123]
self.debug_args = "args"
self.debugger_path = "/test/path"
self.container_env_vars = "container-env-vars"
self.docker_volume_basedir = "basedir"
self.docker_network = "network"
self.log_file = "logfile"
self.skip_pull_image = True
self.parameter_overrides = {}
self.layer_cache_basedir = "/some/layers/path"
self.force_image_build = True
self.shutdown = True
self.region_name = "region"
self.profile = "profile"
self.warm_containers = None
self.debug_function = None
self.ctx_mock = Mock()
self.ctx_mock.region = self.region_name
self.ctx_mock.profile = self.profile
self.host = "host"
self.port = 123
self.static_dir = "staticdir"
self.iac = Mock()
self.project = Mock()
@patch("samcli.commands.local.cli_common.invoke_context.InvokeContext")
@patch("samcli.commands.local.lib.local_api_service.LocalApiService")
def test_cli_must_setup_context_and_start_service(self, local_api_service_mock, invoke_context_mock):
# Mock the __enter__ method to return a object inside a context manager
context_mock = Mock()
invoke_context_mock.return_value.__enter__.return_value = context_mock
service_mock = Mock()
local_api_service_mock.return_value = service_mock
self.warm_containers = None
self.debug_function = None
self.call_cli()
invoke_context_mock.assert_called_with(
template_file=self.template,
function_identifier=None,
env_vars_file=self.env_vars,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
debug_ports=self.debug_ports,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
container_env_vars_file=self.container_env_vars,
parameter_overrides=self.parameter_overrides,
layer_cache_basedir=self.layer_cache_basedir,
force_image_build=self.force_image_build,
aws_region=self.region_name,
aws_profile=self.profile,
warm_container_initialization_mode=self.warm_containers,
debug_function=self.debug_function,
shutdown=self.shutdown,
iac=self.iac,
project=self.project,
)
local_api_service_mock.assert_called_with(
lambda_invoke_context=context_mock, port=self.port, host=self.host, static_dir=self.static_dir
)
service_mock.start.assert_called_with()
@patch("samcli.commands.local.cli_common.invoke_context.InvokeContext")
@patch("samcli.commands.local.lib.local_api_service.LocalApiService")
def test_must_raise_if_no_api_defined(self, local_api_service_mock, invoke_context_mock):
# Mock the __enter__ method to return a object inside a context manager
context_mock = Mock()
invoke_context_mock.return_value.__enter__.return_value = context_mock
service_mock = Mock()
local_api_service_mock.return_value = service_mock
service_mock.start.side_effect = NoApisDefined("no apis")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "Template does not have any APIs connected to Lambda functions"
self.assertEqual(msg, expected)
@parameterized.expand(
[
(InvalidSamDocumentException("bad template"), "bad template"),
(
InvalidLayerReference(),
"Layer References need to be of type " "'AWS::Serverless::LayerVersion' or 'AWS::Lambda::LayerVersion'",
),
(DebuggingNotSupported("Debugging not supported"), "Debugging not supported"),
]
)
@patch("samcli.commands.local.cli_common.invoke_context.InvokeContext")
def test_must_raise_user_exception_on_invalid_sam_template(
self, exeception_to_raise, execption_message, invoke_context_mock
):
invoke_context_mock.side_effect = exeception_to_raise
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = execption_message
self.assertEqual(msg, expected)
@patch("samcli.commands.local.cli_common.invoke_context.InvokeContext")
def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock):
invoke_context_mock.side_effect = OverridesNotWellDefinedError("bad env vars")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "bad env vars"
self.assertEqual(msg, expected)
@patch("samcli.commands.local.cli_common.invoke_context.InvokeContext")
def test_must_raise_user_exception_on_no_free_ports(self, invoke_context_mock):
invoke_context_mock.side_effect = ContainerNotStartableException("no free ports on host to bind with container")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "no free ports on host to bind with container"
self.assertEqual(msg, expected)
@patch("samcli.commands.local.cli_common.invoke_context.InvokeContext")
def test_must_raise_user_exception_on_invalid_imageuri(self, invoke_context_mock):
invoke_context_mock.side_effect = InvalidIntermediateImageError("invalid imageuri")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "invalid imageuri"
self.assertEqual(msg, expected)
def call_cli(self):
start_api_cli(
ctx=self.ctx_mock,
host=self.host,
port=self.port,
static_dir=self.static_dir,
template=self.template,
env_vars=self.env_vars,
debug_port=self.debug_ports,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
container_env_vars=self.container_env_vars,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
parameter_overrides=self.parameter_overrides,
layer_cache_basedir=self.layer_cache_basedir,
force_image_build=self.force_image_build,
warm_containers=self.warm_containers,
debug_function=self.debug_function,
shutdown=self.shutdown,
project_type="CFN",
iac=self.iac,
project=self.project,
)
|
py | 7df8cceb59a2bcfb8715aedd4215b42ada0971fd | import bpy
import math
import numpy as np
#=== add scripts dir to path
import sys
import os
#=== define path of scripts dir
libDir=bpy.path.abspath("//../../scripts/") # version1: relative to current file
#libDir="/where/you/placed/blenderCadCam/scripts/" #version 2: usa an absolute path
if not libDir in sys.path:
sys.path.append(libDir)
#=== add local dir to path
dir = os.path.dirname(bpy.data.filepath)
if not dir in sys.path:
sys.path.append(dir)
#print(sys.path)
#=== blender imports only once even if the file change. if we edit outsde, we need to force a reload
from importlib import reload
#=== import scripts modules
import wingLib
reload(wingLib)
#===================================================================================================
#===
#===================================================================================================
if 0:
import ipdb
ipdb.set_trace()
ipdb.set_trace(context=5)
if 1:
#=== delete all but camera and lamp to start from a clean scene collection
wingLib.deleteAllButNames(['outl','outl2','myWing1','myWing2'])
#===================================================================================================
#=== basic geometry definition
#===================================================================================================
foilwidth=1.6
chAdditive=0.06 #we add this additive as constant to the chordlength to generate an (towrds tip) increasing over-elliptic ch
chordlength=0.17
nSec=41*2
halfSpan=foilwidth/2.0
if 1:
#=============================================================
#=== prepare profiles
#=============================================================
f=libDir+'/AG25_resampled.dat'
cAG25, leAG25=wingLib.foilImport(f,'auto')
f=libDir+'/AG26_resampled.dat'
cAG26, leAG26=wingLib.foilImport(f,'auto')
f=libDir+'/AG14_resampled.dat'
cAG14, leAG14=wingLib.foilImport(f,'auto')
#f=libDir+'/AG27_resampled.dat'
#cAG27, leAG27=wingLib.foilImport(f,'auto')
#=== downsampling of the root profile - we don't nee a too fine resolution for the CAM model
nPoints=100
cAG25r, leAG25r=wingLib.foildDataReduceToNpoints(cAG25,nPoints, True) #True: save trailing edge (kep 1st and last point)
pAG25r=wingLib.curveBezierFromPoints(cAG25r,'PAG25r',True,True)
#=== get & interpolate the outer profile on the root (necessary for morphing)
pAG26=wingLib.curveBezierFromPoints(cAG26,'PAG26',True,True)
pAG14=wingLib.curveBezierFromPoints(cAG14,'PAG14',True,True)
#pAG27=wingLib.curveBezierFromPoints(cAG27,'PAG27',True,True)
cAG14r=wingLib.interpolateBezier2on1(pAG25r, pAG14, leAG25r, leAG14, 40)
cAG26r=wingLib.interpolateBezier2on1(pAG25r, pAG26, leAG25r, leAG26, 40)
#cAG27_=wingLib.interpolateBezier2on1(pAG25, pAG27, leAG25, leAG27, 40)
#=== plot for check:
if 0:
pAG25=wingLib.curveBezierFromPoints(cAG25,'PAG25',True,True)
pAG14r=wingLib.curveBezierFromPoints(cAG14_,'PG14r',True,True)
pAG26r=wingLib.curveBezierFromPoints(cAG26_,'ProfileAG26r',True,True)
#=== clean-up
if 1:
wingLib.deleteByName('PAG25r')
wingLib.deleteByName('PAG14')
wingLib.deleteByName('PAG26')
# compile the coord dict for easy access
cDict={
"AG25": cAG25r,
"AG26": cAG26r,
"AG14": cAG14r,
#"AG27": cAG27_,
}
#=============================================================
#=== prepare base sections settings
#=============================================================
baseSectionsL=[]
baseSectionsL.append({"p":'AG25', "s":0.00*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG25', "s":0.05*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG26', "s":0.40*halfSpan, "tA":0.0, "tMorph":True, "morphT":'lCh'})
baseSectionsL.append({"p":'AG14', "s":0.95*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'})
baseSectionsL.append({"p":'AG14', "s":1.00*halfSpan, "tA":0.0, "tMorph":False, "morphT":'lCh'})
#=============================================================
#=== chordlength distribution
#=============================================================
#=== define section-wise ch extension
dChL=[]
dChL.append({"s": 0.00*halfSpan, "dy": chAdditive})
dChL.append({"s": 0.40*halfSpan, "dy": chAdditive})
dChL.append({"s": 0.95*halfSpan, "dy": chAdditive})
dChL.append({"s": 1.00*halfSpan, "dy": chAdditive})
#=== ellipse parameters
a=halfSpan
b=(chordlength-chAdditive)/2.0
#=== get/init the wing Data object
# for morphed profiles, le is the same
wingData=wingLib.WingFromSections(cDict, leAG25r, baseSectionsL, halfSpan, a, b, dChL)
if 1:
#=== get data for indivudual CAM sections
# get basic ellipse arc points in 1st and 2nd quadrant (the unshifted leading edge) and chordlength
x,y=wingLib.ellipseParamV(a,b,nSec)
ch=np.multiply(y,2.0)#
#==adapted chordlength
ch=wingLib.chordExtensionLinear(ch, x, dChL)
#shellthickness
#thickness=1.0
#=== set 2d profile to be used (gives us a function reference used later)
func4coords=wingData.coords
quality='none'
#plot Re(span)
if 0:
v=8.0# determined from stall velocity, see e.g. https://alexpgh.github.io/foss-toolchain-mpcnc/blenderKissSlope/#wing-loading-and-re
v2=9.7
#v3=15.0
#v4=30.0
#v5=45.0
nu=1.52E-05
outFile=bpy.path.abspath("//Fig_ReSpan_fast.png")
Re=[]
Re.append(np.multiply(ch,v/nu))
Re.append(np.multiply(ch,v2/nu))
#Re.append(np.multiply(ch,v3/nu))
#Re.append(np.multiply(ch,v4/nu))
#Re.append(np.multiply(ch,v5/nu))
numpy_array = np.array(Re)
transpose = numpy_array.T
#legend=[str(v)+' m/s', str(v2), str(v3),str(v4),str(v5)]
legend=[]
#n=int(len(Re)/2)+1
n=int(transpose.shape[0]/2)+1
#import ipdb
#ipdb.set_trace()
#ipdb.set_trace(context=5)
#wingLib.plotArray(x[0:n],Re[0:n],'Re(span)',outFile)
#wingLib.plotArray(x,Re,'Re(span)',outFile)
wingLib.plotArray(x[0:n],transpose[0:n,:],'Re(span)', legend, outFile)
import ipdb
ipdb.set_trace()
ipdb.set_trace(context=5)
#=== leading edge shift definition
LeShiftL=[]
LeShiftL.append(wingLib.LeShift('elliptic',0.04, 0.5, 1.0,foilwidth/2.0))
ysh=wingLib.applyLeShifts(x,y, LeShiftL)
#placeSections(x,ysh,ch)
sectionNames=wingLib.placeSectionsMinLimited(x,ysh,ch,0.001,func4coords,quality)
if 1:
wingLib.bridgeListOfEdgeLoopsCloseOuterWithFace(sectionNames,'myWing')
#shift to origin
bpy.context.object.location[1] = -chordlength/2.0
bpy.context.object.location[2] = 0.0
|
py | 7df8cda2a9d1cfde2a273d9e31bb0bb5810da95a | import glob
import numpy as np
import os
from urllib.request import urlretrieve
import pkg_resources
import pandas as pd
from multiprocessing import Pool
from functools import partial
from itertools import repeat
import pickle
def get_matrix(self,subject):
"""
Child of dataset.load_matrices
----------
Parameters
----------
subject: either a single subject, or ** for all subjects
"""
if self.parcels == 'schaefer': n_parcels = 400
if self.parcels == 'gordon': n_parcels = 333
if self.source == 'pnc':
if self.matrix_type == 'rest':
if self.parcels == 'gordon':
matrix_path = '/{0}/neuroimaging/rest/restNetwork_gordon/GordonPNCNetworks/{1}_GordonPNC_network.txt'.format(self.data_path,subject)
if self.parcels == 'schaefer':
matrix_path = '/{0}//neuroimaging/rest/restNetwork_schaefer400/Schaefer400Networks/{1}_Schaefer400_network.txt'.format(self.data_path,subject)
if self.source == 'hcp':
matrix_path = '/{0}/matrices/{1}_{2}.npy'.format(self.data_path,subject,self.matrix_type)
if self.source == 'pnc':
try:
m = np.loadtxt(matrix_path)
except:
m = np.zeros((n_parcels,n_parcels))
m[:,:] = np.nan
if self.source == 'hcp':
try:
m = np.load(matrix_path)
except:
m = np.zeros((n_parcels,n_parcels))
m[:,:] = np.nan
np.fill_diagonal(m,np.nan) #idiot proof
return m
def load_dataset(name):
return pickle.load(open("{0}".format(name), "rb"))
class dataset:
"""
This is the main object to use to load a dataset
"""
def __init__(self, source='pnc',cores=1):
self.source = source
self.cores = cores
if self.source == 'pnc':
self.data_path = '/project/deid_bblrepo1/n1601_dataFreeze/'
self.subject_column = 'scanid'
self.measures = pd.read_csv('{0}/demographics/n1601_demographics_go1_20161212.csv'.format(self.data_path))
self.subject_column = {'scanid':'subject'}
self.measures = self.measures.rename(columns=self.subject_column)
clinical = pd.read_csv('{0}/clinical/n1601_goassess_itemwise_bifactor_scores_20161219.csv'.format(self.data_path)).rename(columns=self.subject_column)
self.measures = self.measures.merge(clinical,how='outer',on='subject')
clinical_dict = pd.read_csv('{0}/clinical/goassess_clinical_factor_scores_dictionary.txt'.format(self.data_path),sep='\t')[24:29].drop(columns=['variablePossibleValues','source', 'notes'])
self.data_dict = {}
for k,i in zip(clinical_dict.variableName.values,clinical_dict.variableDefinition.values):
self.data_dict[k.strip(' ')] = i.strip(' ')
cognitive = pd.read_csv('{0}/cnb/n1601_cnb_factor_scores_tymoore_20151006.csv'.format(self.data_path)).rename(columns=self.subject_column)
self.measures = self.measures.merge(cognitive,how='outer',on='subject')
cognitive_dict = pd.read_csv('{0}/cnb/cnb_factor_scores_dictionary.txt'.format(self.data_path),sep='\t').drop(columns=['source'])
for k,i in zip(cognitive_dict.variableName.values,cognitive_dict.variableDefinition.values):
self.data_dict[k.strip(' ')] = i.strip(' ')
cog_factors = pd.read_csv('{0}/cnb/cog_factors.csv'.format(self.data_path)).rename(columns=self.subject_column)
self.measures = self.measures.merge(cog_factors,how='outer',on='subject')
if self.source == 'hcp':
self.data_path = '/home/mb3152/hcp/'
self.subject_column = 'Subject'
self.measures = pd.read_csv('{0}/unrestricted_mb3152_2_25_2021_8_59_45.csv'.format(self.data_path))
self.subject_column = {'Subject':'subject'}
self.measures = self.measures.rename(columns=self.subject_column)
def update_subjects(self,subjects):
self.measures = self.measures[self.measures.subject.isin(subjects)]
def methods(self):
resource_package = 'pennlinckit'
resource_path = '{0}_boiler.txt'.format(self.source)
path = pkg_resources.resource_stream(resource_package, resource_path)
f = open(path.name, 'r').read()
print (f)
def asl(self):
self.asl = 0
def imaging_qc(self):
if self.source == 'pnc':
if self.matrix_type == 'rest':
qc = pd.read_csv('{0}/neuroimaging/rest/n1601_RestQAData_20170714.csv'.format(self.data_path)).rename(columns=self.subject_column)
qa_dict = pd.read_csv('{0}/neuroimaging/rest/restQADataDictionary_20161010.csv'.format(self.data_path))
for k,i in zip(qa_dict.columnName.values,qa_dict.columnDescription.values):
self.data_dict[k] = i
if self.matrix_type == 'diffusion_pr':
1/0
return qc
def load_matrices(self, matrix_type, parcels='schaefer'):
"""
get a matrix from this dataset
----------
parameters
----------
matrix_type: what type of matrix do you want? can be a task, resting-state, diffusion
parcels: schaefer or gordon
----------
returns
----------
out : mean numpy matrix, fisher-z transformed before meaning, np.nan down the diagonal
----------
pnc examples
----------
dataset.get_matrix('nback')
dataset.get_matrix('nback',parcels='gordon')
dataset.get_matrix('diffusion_pr')
"""
self.matrix_type = matrix_type
self.parcels = parcels
qc = self.imaging_qc()
self.measures = self.measures.merge(qc,how='inner',on='subject')
self.matrix = []
for subject in self.measures.subject:
self.matrix.append(get_matrix(self,subject))
self.matrix = np.array(self.matrix)
def filter(self,way,value=None,column=None):
if way == '==':
self.matrix = self.matrix[self.measures[column]==value]
self.measures = self.measures[self.measures[column]==value]
if way == '!=':
self.matrix = self.matrix[self.measures[column]!=value]
self.measures = self.measures[self.measures[column]!=value]
if way == 'np.nan':
self.matrix = self.matrix[np.isnan(self.measures[column])==False]
self.measures = self.measures[np.isnan(self.measures[column])==False]
if way == '>':
self.matrix = self.matrix[self.measures[column]>value]
self.measures = self.measures[self.measures[column]>value]
if way == '<':
self.matrix = self.matrix[self.measures[column]<value]
self.measures = self.measures[self.measures[column]<value]
if way == 'matrix':
mask = np.isnan(self.matrix).sum(axis=1).sum(axis=1) == self.matrix.shape[-1]
self.measures = self.measures[mask]
self.matrix = self.matrix[mask]
if way == 'cognitive':
factors = ['F1_Exec_Comp_Res_Accuracy_RESIDUALIZED','F2_Social_Cog_Accuracy_RESIDUALIZED','F3_Memory_Accuracy_RESIDUALIZED']
mask = np.isnan(self.measures[factors]).sum(axis=1) == 0
self.measures = self.measures[mask]
self.matrix = self.matrix[mask]
def save(self,name):
pickle.dump(self, open("{0}".format(name), "wb")) # save it into a file named save.p
class allen_brain_institute:
def __init__(self):
"""
Allen Gene Expression data in the Scheafer 400 parcels.
----------
Returns
----------
out : left hemisphere expression, right hemisphere expression, names of the genes
"""
self.data_home = os.path.expanduser('~/allen/')
self.data_home = os.path.join(self.data_home)
if os.path.exists(self.data_home) == False:
print ("Gemme a sec, I am downloading allen gene expression to: %s" %(self.data_home))
os.makedirs(self.data_home)
urlretrieve('https://www.dropbox.com/s/1zahnd0k0jpk0xf/allen_expression_genes.npy?dl=1',self.data_home + 'allen_expression_genes.npy')
urlretrieve('https://www.dropbox.com/s/879cuel80nntipq/allen_expression_lh.npy?dl=1',self.data_home + 'allen_expression_lh.npy')
urlretrieve('https://www.dropbox.com/s/cnb6aacerdhhd4p/allen_expression_rh.npy?dl=1',self.data_home + 'allen_expression_rh.npy')
names = np.load(self.data_home + 'allen_expression_genes.npy',allow_pickle=True)
final_names = []
for n in names:final_names.append(n[0][0])
np.save(self.data_home + 'allen_expression_genes.npy',final_names)
print ("Okay, done, I won't have to do this again!")
self.names = np.load(self.data_home + 'allen_expression_genes.npy')
self.expression= np.zeros((400,len(self.names)))
self.expression[:200] = np.load(self.data_home + 'allen_expression_lh.npy')[:200]
self.expression[200:] = np.load(self.data_home + 'allen_expression_rh.npy')[200:]
class evo_expansion:
def __init__(self):
resource_package = 'pennlinckit'
resource_path = 'schaefer400x17_rescaled_regional_evoExpansion_LHfixed.npy'
path = pkg_resources.resource_stream(resource_package, resource_path)
self.data = np.load(path.name)
class gradient:
def __init__(self):
resource_package = 'pennlinckit'
resource_path = 'schaefer400x17_mean_regional_margulies_gradient.txt'
path = pkg_resources.resource_stream(resource_package, resource_path)
self.data = np.loadtxt(path.name)
|
py | 7df8cde210f5a4d97e840a2553e8567d13760ac0 | # -*- coding: utf-8 -*-
"""Box layout vertical."""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk
# @Gtk.Template(string, filename, resource_path)
@Gtk.Template(filename='box_vertical.glade')
class MainWindow(Gtk.ApplicationWindow):
__gtype_name__ = 'MainWindow'
if __name__ == '__main__':
win = MainWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
|
py | 7df8ce95d0d33b44386effbe4960d10bb3b59320 | #! Python 3.4
import logging
import requests
from multiprocessing import Queue # cx_freeze is dumb
from datetime import datetime
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename
# Logging to file so we can track down failed distributions
logging.basicConfig(filename="log.txt", level=logging.INFO, filemode='w', format='%(message)s')
logging.info("PGrant log file") # Logging everything into info level because formatting is disabled anyway
logging.info(datetime.now())
# Defining core components
GRANT_ITEM_URL = 'https://api.steampowered.com/ITFPromos_440/GrantItem/v0001/'
def grant_medals(idarray, promo_id, api_key):
steam_ids = idarray
print("Granting {} to:".format(promo_id))
logging.info("Granting {} to:".format(promo_id))
for steam_id in steam_ids:
print(" {} ".format(steam_id), end='')
logging.info(" {} ".format(steam_id))
grant_item(steam_id, promo_id, api_key)
Reset()
logging.info("--------------------------")
print("--------------------------")
logging.info("Finished file processing")
print("Finished file processing")
def grant_item(steam_id, promo_id, api_key):
data = {
'SteamID': steam_id,
'PromoID': promo_id,
'Key': api_key,
}
response = requests.post(GRANT_ITEM_URL, data = data)
try:
json = response.json()
except ValueError:
print('[FAIL: request error/not authorized]')
logging.info('[FAIL: request error/not authorized]')
return
if 'result' not in json:
print('[FAIL: response error/invalid promo id]')
logging.info('[FAIL: response error/invalid promo id]')
return
result = json['result']
if int(result['status']) != 1:
print("[FAIL: {}]".format(result['statusDetail']))
logging.info("[FAIL: {}]".format(result['statusDetail']))
else:
print("[SUCCESS!]")
logging.info("[SUCCESS!]")
# Defining GUI components
root = Tk()
fileopened = False
def OpenFile():
name = askopenfilename(filetypes =(("Text File", "*.txt"),("All Files","*.*")),
title = "Choose a list of SteamIDs"
)
try:
with open(name,'r') as UseFile:
global steamids
steamids = UseFile.read().splitlines()
print(name)
print("Found " + str(len(steamids)) + " potential SteamIDs")
logging.info("--------------------------")
logging.info("Loaded file: " + name)
logging.info("Found " + str(len(steamids)) + " potential SteamIDs")
global fileopened
fileopened = True
BtnCheck(fileopened)
global filelabel
filelabel['text'] = name
filelabel['foreground'] = "black"
except:
print("Invalid file")
def About():
window = Toplevel(root)
window.title( "About PGrant")
window.geometry( "400x250")
version = Label(window, text ="v. 0.4", foreground="grey", font=(4))
version.pack(padx=6, pady = 2, side=TOP)
aboutt = Label(window, text ="This tool is designed to distribute promotional TF2 items, parsing multiple SteamID64s from a file, one per line. Be sure to keep your console output visible so you can keep track of the progress.", wraplength=180, font=(6))
aboutt.pack(padx=6, pady = 2, side=LEFT)
aboutp = Label(window, text ="The software is licensed under the Apache License 2.0. A major part of the original code was generously provided by Benjamin Schaaf.", wraplength=180, font=(6))
aboutp.pack(padx=6, pady = 6, side=LEFT)
def BtnCheck(event):
if (len(apitoken.get()) != 0)&(len(promoid.get()) != 0)&fileopened:
global runbutton
runbutton['state'] = 'normal'
runbutton['text'] = 'Grant'
else:
runbutton['state'] = 'disabled'
runbutton['text'] = 'Grant'
def Reset():
global fileopened
fileopened = False
BtnCheck(fileopened)
global apitoken
apitoken.delete(0, END)
global promoid
promoid.delete(0, END)
global filelabel
filelabel['text'] = "No file loaded."
filelabel['foreground'] = "grey"
# Window properties
root.title( "PGrant 0.4")
root.geometry( "350x400")
# Packing GUI elements
apilabel = Label(root, text ="Steam API key:", font=(14))
apilabel.pack(padx=6, pady = 2)
apitoken = Entry(root, width = 37)
apitoken.bind("<KeyRelease>", BtnCheck)
apitoken.pack(padx=6, pady = 2)
apitoken.focus_set()
promolabel = Label(root, text ="Promo ID:",font=(14))
promolabel.pack(padx=6, pady = 2)
promoid = Entry(root, width = 37)
promoid.bind("<KeyRelease>", BtnCheck)
promoid.pack(padx=6, pady = 2)
runbutton = Button(root, text="Grant", width=14, state=DISABLED, command=lambda: grant_medals(steamids, promoid.get(), apitoken.get()))
runbutton.pack(padx=6, pady = 10)
filelabel = Label(root, text ="No file loaded.", wraplength=300, foreground="grey",font=(12))
filelabel.pack(side = BOTTOM, padx=6, pady = 14)
# Menu Bar
menu = Menu(root)
root.config(menu=menu)
menu.add_command(label = 'Load a file', command = OpenFile)
menu.add_command(label = 'About', command = About)
root.mainloop() |
py | 7df8cefaf98a6352ad80143d60ca2fb3deb072bd | # Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PySide2 import QtGui, QtCore
import logging
log = logging.getLogger(__name__)
def comboBoxConfig(comboBox, values, default=None):
"""Configure (or reconfigure) a QT combo box.
:param comboBox: The combo box to configure.
:param values: The list of value strings for the combo box.
:param default: The default value for the combo box which must also be
in values. If None, then attempt to keep the current value of the
combo box if at all possible. If not, then just select the first
item.
:return: The new text value for the combobox.
"""
if default is not None and default not in values:
log.warning('Ignoring default value "%s" since it is not in values: %s' % (default, values))
default = None
if default is None and comboBox.count():
# attempt to keep the previous value
currentValue = str(comboBox.currentText())
if currentValue in values:
default = currentValue
d1 = set(values)
d0 = set([str(comboBox.itemText(idx)) for idx in range(comboBox.count())])
deleted = d0 - d1
added = [x for x in values if x not in d0] # keep ordered
for value in deleted:
idx = comboBox.findText(value)
comboBox.removeItem(idx)
for value in added:
comboBox.addItem(value)
if default:
default = str(default)
idx = comboBox.findText(default)
if idx < 0:
log.warning('Could not find default entry')
elif str(comboBox.currentText()) != default:
comboBox.setCurrentIndex(idx)
return str(comboBox.currentText())
def comboBoxDecrement(box):
idx = box.currentIndex()
if idx > 0:
box.setCurrentIndex(idx - 1)
return True
return False
def comboBoxIncrement(box):
idx = box.currentIndex()
idx += 1
if idx < box.count():
box.setCurrentIndex(idx)
return True
return False
def confirmDiscard(parent):
msgBox = QtGui.QMessageBox(parent)
msgBox.setText('Existing data has not been saved.')
msgBox.setInformativeText('Discard data?')
msgBox.setStandardButtons(QtGui.QMessageBox.Discard | QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Cancel)
rv = msgBox.exec_()
if rv != QtGui.QMessageBox.Discard:
return False
return True
def confirmOverwrite(parent, targetName=None):
targetName = 'file' if targetName is None else targetName
msgBox = QtGui.QMessageBox(parent)
msgBox.setText('%s already exists.' % targetName.title())
msgBox.setInformativeText('Overwrite %s?' % targetName)
msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Cancel)
rv = msgBox.exec_()
if rv != QtGui.QMessageBox.Ok:
return False
return True
def clear_layout(layout):
"""Clear and delete all widgets from a layout.
:param layout: The QT layout.
"""
# https://stackoverflow.com/questions/9374063/remove-widgets-and-layout-as-well
if layout is not None:
while layout.count():
item = layout.takeAt(0)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
clear_layout(item.layout())
|
py | 7df8cfc9e31937b7f6d9394b752cb945e54dc212 | from bs4 import BeautifulSoup
import requests
response = requests.get("https://news.ycombinator.com/news")
yc_web_page = response.text
soup = BeautifulSoup(yc_web_page, "html.parser")
articles = soup.find_all(name="a", class_="storylink")
article_texts = []
article_links = []
for article_tag in articles:
text = article_tag.getText()
article_texts.append(text)
link = article_tag.get("href")
article_links.append(link)
article_upvotes = [int(score.getText().split()[0]) for score in soup.find_all(name="span", class_="score")]
# print(article_texts)
# print(article_links)
# print(article_upvotes)
largest_number = max(article_upvotes)
largest_index = article_upvotes.index(largest_number)
print(article_texts[largest_index])
print(article_links[largest_index])
print(article_upvotes[largest_index])
# with open('website.html', 'r') as file:
# contents = file.read()
#
# soup = BeautifulSoup(contents, 'html.parser')
#
# # print(soup.title)
# # print(soup.title.name)
# # print(soup.title.string)
# # print(soup.prettify())
# # print(soup)
# # print(soup.li.string)
#
# # print(soup.find_all(name="li"))
#
# # all_anchor_tags = soup.find_all(name="a")
# #
# # for tag in all_anchor_tags:
# # # print(tag.getText())
# # print(tag.get("href"))
#
# # heading = soup.find(name="h1", id="name")
# # print(heading.getText())
#
# # section_heading = soup.find(name="h3", class_="heading")
# # print(section_heading.getText())
# # print(section_heading.get("class"))
#
# company_url = soup.select_one(selector="p a")
# print(company_url)
#
# h3_heading = soup.find_all("h3", class_="heading")
# print(h3_heading)
#
# name = soup.select_one("#name")
# print(name)
#
# headings = soup.select(".heading")
# print(headings)
|
py | 7df8d1080269445a4e93fc0dbfd94e1d85d90395 | """
Django settings for ganzige project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j!o@k8hjlc0-(fapg8zomi%++8s^5^wrfbf$-3rh3t5i8ymj(5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'webhook',
'blog',
'home',
'photo',
'data',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ganzige.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ganzige.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ganzige',
'USER': 'root',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CKEDITOR_UPLOAD_PATH = 'upload'
|
py | 7df8d17585ac0fbae7e0d76ad82219d8f108ad89 | import os
import unittest
import numpy as np
import torch
from pytorch_common import utils
from pytorch_common.additional_configs import BaseModelConfig
from pytorch_common.models import create_model
from pytorch_common.types import Callable, Dict, List, Optional, Tuple, Union, _Batch
class TestUtils(unittest.TestCase):
def test_file_handling(self):
"""
Test saving/loading of different
files (pickle, dill, yaml).
"""
def _test_module_file_handling(
data: Union[np.ndarray, Dict],
primary_path: str,
file_name: Optional[str] = None,
module: Optional[str] = "pickle",
) -> None:
"""
Test saving/loading of a
file with a given `module`.
"""
# Save the file
utils.save_object(data, primary_path, file_name, module=module)
# Load saved file
loaded_data = utils.load_object(primary_path, file_name, module=module)
# Ensure results match
result = data == loaded_data
if not isinstance(result, bool):
result = result.all()
self.assertTrue(result)
# Delete saved file
if file_name is None:
utils.remove_object(primary_path)
self.assertFalse(os.path.isfile(primary_path))
else:
utils.remove_object(primary_path, file_name)
self.assertFalse(os.path.isfile(utils.get_file_path(primary_path, file_name)))
# Initialize dummy directory and data
primary_path = "dummy_dir"
dummy_np_data = np.random.randn(10, 10)
dummy_yaml_data = {"x": 1, "y": 2, "z": 3}
# Test file handling for all file types
for data, file_name, module in zip(
[dummy_np_data, dummy_np_data, dummy_yaml_data],
["dummy_data.pkl", "dummy_data.pkl", "dummy_data.yaml"],
["pickle", "dill", "yaml"],
):
# Test directly with `file_name`
_test_module_file_handling(data, file_name, module=module)
# Test with `file_name` inside `primary_path`
utils.make_dirs(primary_path)
_test_module_file_handling(data, primary_path, file_name=file_name, module=module)
# Delete created directories
utils.remove_dir(primary_path)
self.assertFalse(os.path.isdir(primary_path))
def test_get_string_from_dict(self):
"""
Test correct generation of string
from config dictionary.
"""
# Test output for empty inputs
self.assertEqual(utils.get_string_from_dict(), "")
self.assertEqual(utils.get_string_from_dict({}), "")
# Test output
dictionary = {"size": 100, "lr": 1e-3}
self.assertEqual(utils.get_string_from_dict(dictionary), "lr_0.001-size_100")
# Test same output regardless of order
dictionary = {"lr": 1e-3, "size": 100}
self.assertEqual(utils.get_string_from_dict(dictionary), "lr_0.001-size_100")
def test_get_string_from_dict(self):
"""
Test correct generation of unique
string from config dictionary.
"""
primary_name = "dummy"
# Test unique string
dictionary = {"size": 100, "lr": 1e-3}
unique_str1 = utils.get_unique_config_name(primary_name, dictionary)
self.assertTrue(unique_str1.startswith("dummy-"))
# Test same string regardless of order
dictionary = {"lr": 1e-3, "size": 100}
unique_str2 = utils.get_unique_config_name(primary_name, dictionary)
self.assertEqual(unique_str1, unique_str2)
def test_send_model_to_device(self):
"""
Test sending of model to different devices.
"""
# Create model
config = BaseModelConfig({"in_dim": 1, "num_classes": 1})
model = create_model("single_layer_classifier", config)
# Test sending model to CPU
model_cpu = utils.send_model_to_device(model.copy(), "cpu")
self.assertFalse(utils.is_model_on_gpu(model_cpu))
self.assertFalse(utils.is_model_on_gpu(model))
self.assertFalse(model_cpu.is_cuda)
self.assertFalse(model.is_cuda)
if torch.cuda.is_available():
# Test sending model to GPU
model_cuda = utils.send_model_to_device(model_cpu.copy(), "cuda")
self.assertTrue(utils.is_model_on_gpu(model_cuda))
self.assertTrue(model_cuda.is_cuda)
# Ensure original models unchanged
self.assertFalse(utils.is_model_on_gpu(model_cpu))
self.assertFalse(utils.is_model_on_gpu(model))
self.assertFalse(model_cpu.is_cuda)
self.assertFalse(model.is_cuda)
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
# Test sending model to multiple GPUs
model_parallel = utils.send_model_to_device(model_cuda.copy(), "cuda", range(n_gpu))
self.assertTrue(utils.is_model_on_gpu(model_parallel))
self.assertTrue(utils.is_model_parallelized(model_parallel))
self.assertTrue(model_parallel.is_cuda)
self.assertTrue(model_parallel.is_parallelized)
# Ensure original single-GPU model unchanged
self.assertTrue(utils.is_model_on_gpu(model_cuda))
self.assertFalse(utils.is_model_parallelized(model_cuda))
self.assertTrue(model_cuda.is_cuda)
self.assertFalse(model_cuda.is_parallelized)
# Ensure original model unchanged
self.assertFalse(utils.is_model_on_gpu(model))
self.assertFalse(utils.is_model_parallelized(model))
self.assertFalse(model.is_cuda)
self.assertFalse(model.is_parallelized)
# Test sending of multi-GPU model to CPU
model_cpu = utils.send_model_to_device(model_parallel, "cpu")
self.assertFalse(utils.is_model_on_gpu(model_cpu))
self.assertFalse(utils.is_model_parallelized(model_cpu))
self.assertFalse(model_cpu.is_cuda)
self.assertFalse(model_cpu.is_parallelized)
# Test sending of single-GPU model to CPU
model_cpu = utils.send_model_to_device(model_cuda, "cpu")
self.assertFalse(utils.is_model_on_gpu(model_cpu))
self.assertFalse(utils.is_model_parallelized(model_cpu))
self.assertFalse(model_cpu.is_cuda)
self.assertFalse(model_cpu.is_parallelized)
def test_send_batch_to_device(self):
"""
Test sending of batch to different devices.
"""
# Define batch
a, b, c = [1, 2, 3], [4, 5, 6], [7, 8, 9]
batch = self._get_batch(a, b, c, batch_type=torch.tensor, device="cpu")
if torch.cuda.is_available():
# Test sending batch to GPU
batch_cuda = utils.send_batch_to_device(batch, "cuda")
self.assertTrue(utils.compare_tensors_or_arrays(batch_cuda, batch))
self.assertFalse(utils.is_batch_on_gpu(batch))
self.assertTrue(utils.is_batch_on_gpu(batch_cuda))
# Test sending batch to CPU
batch_cpu = utils.send_batch_to_device(batch, "cpu")
self.assertTrue(utils.compare_tensors_or_arrays(batch_cpu, batch))
self.assertFalse(utils.is_batch_on_gpu(batch_cpu))
self.assertTrue(utils.is_batch_on_gpu(batch_cuda))
def test_convert_tensor_to_numpy(self):
"""
Test converting a batch of torch
tensor(s) to numpy array(s).
"""
# Define numpy and torch batches
a, b, c = [1.5, 2, 3], [4, -5, 6], [7, 0, 9]
batch_np = self._get_batch(a, b, c, batch_type=np.array)
batch_torch = self._get_batch(a, b, c, batch_type=torch.tensor, device="cpu")
# Compare contents of both batches
self.assertTrue(utils.compare_tensors_or_arrays(batch_np, utils.convert_tensor_to_numpy(batch_torch)))
if torch.cuda.is_available():
# Compare contents of both batches when tensor is on GPU
batch_torch_cuda = utils.send_batch_to_device(batch_torch, "cuda")
self.assertTrue(utils.compare_tensors_or_arrays(batch_np, utils.convert_tensor_to_numpy(batch_torch_cuda)))
def test_convert_numpy_to_tensor(self):
"""
Test converting a batch of numpy
array(s) to torch tensor(s).
"""
# Define numpy and torch batches
a, b, c = [1.5, 2, 3], [4, -5, 6], [7, 0, 9]
batch_np = self._get_batch(a, b, c, batch_type=np.array)
batch_torch = self._get_batch(a, b, c, batch_type=torch.tensor, device="cpu")
# Compare contents of both batches
self.assertTrue(utils.compare_tensors_or_arrays(batch_torch, utils.convert_numpy_to_tensor(batch_np)))
if torch.cuda.is_available():
# Compare contents of both batches when tensor is on GPU
batch_torch_cuda = utils.convert_numpy_to_tensor(batch_np, "cuda")
self.assertTrue(utils.compare_tensors_or_arrays(batch_torch, batch_torch_cuda))
self.assertTrue(utils.is_batch_on_gpu(batch_torch_cuda))
def _get_batch(
self,
a: List[float],
b: List[float],
c: List[float],
batch_type: Callable[[List[float]], Union[np.ndarray, torch.Tensor]],
**kwargs,
) -> _Batch:
"""
Construct a numpy/torch batch of shape
which forces recursion in type conversion.
"""
a_ = batch_type(a, **kwargs)
b_ = batch_type(b, **kwargs)
c_ = batch_type(c, **kwargs)
return ((a_, b_), c_)
if __name__ == "__main__":
unittest.main()
|
py | 7df8d1822280df0178bc00428ced69506f45bbd5 | import json
import logging
import os
import re
import sys
import unicodedata
from datetime import datetime
from enum import Enum, auto
from time import sleep
from typing import Optional, Tuple
import emoji
import yaml
from colorama import Fore, Style
from langdetect import detect
from AllInOneInstagramBot.core.device_facade import Timeout
from AllInOneInstagramBot.core.resources import ResourceID as resources
from AllInOneInstagramBot.core.utils import random_sleep
from AllInOneInstagramBot.core.views import FollowStatus, ProfileView
logger = logging.getLogger(__name__)
FIELD_SKIP_BUSINESS = "skip_business"
FIELD_SKIP_NON_BUSINESS = "skip_non_business"
FIELD_SKIP_FOLLOWING = "skip_following"
FIELD_SKIP_FOLLOWER = "skip_follower"
FIELD_SKIP_IF_LINK_IN_BIO = "skip_if_link_in_bio"
FIELD_SKIP_PRIVATE = "skip_if_private"
FIELD_SKIP_PUBLIC = "skip_if_public"
FIELD_MIN_FOLLOWERS = "min_followers"
FIELD_MAX_FOLLOWERS = "max_followers"
FIELD_MIN_FOLLOWINGS = "min_followings"
FIELD_MAX_FOLLOWINGS = "max_followings"
FIELD_MIN_POTENCY_RATIO = "min_potency_ratio"
FIELD_MAX_POTENCY_RATIO = "max_potency_ratio"
FIELD_FOLLOW_PRIVATE_OR_EMPTY = "follow_private_or_empty"
FIELD_PM_TO_PRIVATE_OR_EMPTY = "pm_to_private_or_empty"
FIELD_COMMENT_PHOTOS = "comment_photos"
FIELD_COMMENT_VIDEOS = "comment_videos"
FIELD_COMMENT_CAROUSELS = "comment_carousels"
FIELD_BLACKLIST_WORDS = "blacklist_words"
FIELD_MANDATORY_WORDS = "mandatory_words"
FIELD_SPECIFIC_ALPHABET = "specific_alphabet"
FIELD_BIO_LANGUAGE = "biography_language"
FIELD_BIO_BANNED_LANGUAGE = "biography_banned_language"
FIELD_MIN_POSTS = "min_posts"
FIELD_MIN_LIKERS = "min_likers"
FIELD_MAX_LIKERS = "max_likers"
FIELD_MUTUAL_FRIENDS = "mutual_friends"
IGNORE_CHARSETS = ["MATHEMATICAL"]
def load_config(config):
global args
global configs
global ResourceID
args = config.args
configs = config
ResourceID = resources(config.args.app_id)
class SkipReason(Enum):
YOU_FOLLOW = auto()
FOLLOW_YOU = auto()
IS_PRIVATE = auto()
IS_PUBLIC = auto()
UNKNOWN_PRIVACY = auto()
LT_FOLLOWERS = auto()
GT_FOLLOWERS = auto()
LT_FOLLOWINGS = auto()
GT_FOLLOWINGS = auto()
POTENCY_RATIO = auto()
HAS_BUSINESS = auto()
HAS_NON_BUSINESS = auto()
NOT_ENOUGH_POSTS = auto()
BLACKLISTED_WORD = auto()
MISSING_MANDATORY_WORDS = auto()
ALPHABET_NOT_MATCH = auto()
ALPHABET_NAME_NOT_MATCH = auto()
BIOGRAPHY_LANGUAGE_NOT_MATCH = auto()
NOT_LOADED = auto()
RESTRICTED = auto()
HAS_LINK_IN_BIO = auto()
LT_MUTUAL = auto()
BIOGRAPHY_IS_EMPTY = auto()
class Profile(object):
def __init__(
self,
mutual_friends,
follow_button_text,
is_restricted,
is_private,
has_business_category,
posts_count,
biography,
link_in_bio,
fullname,
):
self.datetime = str(datetime.now())
self.followers = 0
self.followings = 0
self.mutual_friends = mutual_friends
self.follow_button_text = follow_button_text
self.is_restricted = is_restricted
self.is_private = is_private
self.has_business_category = has_business_category
self.posts_count = posts_count
self.biography = biography
self.link_in_bio = link_in_bio
self.fullname = fullname
def set_followers_and_following(
self, followers: Optional[int], followings: Optional[int]
) -> None:
self.followers = followers
self.followings = followings
if followers is not None or followings is not None:
self.potency_ratio = (
0 if self.followings == 0 else self.followers / self.followings
)
else:
self.potency_ratio = None
class Filter:
conditions = None
def __init__(self, storage=None):
filter_path = storage.filter_path
if configs.args.disable_filters:
logger.warning("Filters are disabled!")
elif os.path.exists(filter_path) and filter_path.endswith(".yml"):
with open(filter_path, "r", encoding="utf-8") as stream:
try:
self.conditions = yaml.safe_load(stream)
except Exception as e:
logger.error(f"Error: {e}")
elif os.path.exists(filter_path):
with open(filter_path, "r", encoding="utf-8") as json_file:
try:
self.conditions = json.load(json_file)
logger.warning(
"Using filter.json is deprecated from version 2.3.0 and will stop working very soon, use filters.yml instead!"
)
sleep(5)
except Exception as e:
logger.error(
f"Please check {json_file.name}, it contains this error: {e}"
)
sys.exit(2)
self.storage = storage
if self.conditions is not None:
logger.info("-" * 70, extra={"color": f"{Fore.YELLOW}{Style.BRIGHT}"})
logger.info(
f"{'Filters recap (no spell check!)':<35} Value",
extra={"color": f"{Fore.YELLOW}{Style.BRIGHT}"},
)
logger.info("-" * 70, extra={"color": f"{Fore.YELLOW}{Style.BRIGHT}"})
for k, v in self.conditions.items():
if isinstance(v, bool):
logger.info(
f"{k:<35} {v}",
extra={"color": f"{Fore.GREEN if v else Fore.RED}"},
)
else:
logger.info(f"{k:<35} {v}", extra={"color": f"{Fore.WHITE}"})
else:
logger.warning(
"The filters file doesn't exists in your account folder. Download and use it from https://github.com/AllInOneInstagramBot/bot/blob/08e1d7aff39ec47543fa78aadd7a2f034b9ae34d/config-examples/filters.yml and place it in your account folder!"
)
def is_num_likers_in_range(self, likes_on_post: str) -> bool:
if self.conditions is not None and likes_on_post is not None:
if likes_on_post == -1:
logger.debug("We don't know how many likers this post has.")
return True
else:
field_min_likers = self.conditions.get(FIELD_MIN_LIKERS, 1)
field_max_likers = self.conditions.get(FIELD_MAX_LIKERS, 1000000)
if likes_on_post in range(field_min_likers, field_max_likers):
logger.info(
f"Post has likes in range: {field_min_likers}-{field_max_likers}."
)
return True
else:
logger.info(
f"Post has not likes in range: {field_min_likers}-{field_max_likers}."
)
return False
else:
logger.debug("filters.yml not loaded!")
return True
def return_check_profile(self, username, profile_data, skip_reason=None) -> bool:
if self.storage is not None:
self.storage.add_filter_user(username, profile_data, skip_reason)
return skip_reason is not None
def check_profile(self, device, username):
"""
This method assumes being on someone's profile already.
"""
if self.conditions is not None:
field_skip_business = self.conditions.get(FIELD_SKIP_BUSINESS, False)
field_skip_non_business = self.conditions.get(
FIELD_SKIP_NON_BUSINESS, False
)
field_skip_following = self.conditions.get(FIELD_SKIP_FOLLOWING, False)
field_skip_follower = self.conditions.get(FIELD_SKIP_FOLLOWER, False)
field_min_followers = self.conditions.get(FIELD_MIN_FOLLOWERS)
field_max_followers = self.conditions.get(FIELD_MAX_FOLLOWERS)
field_min_followings = self.conditions.get(FIELD_MIN_FOLLOWINGS)
field_max_followings = self.conditions.get(FIELD_MAX_FOLLOWINGS)
field_min_potency_ratio = self.conditions.get(FIELD_MIN_POTENCY_RATIO, 0)
field_max_potency_ratio = self.conditions.get(FIELD_MAX_POTENCY_RATIO, 999)
field_blacklist_words = self.conditions.get(FIELD_BLACKLIST_WORDS, [])
field_mandatory_words = self.conditions.get(FIELD_MANDATORY_WORDS, [])
field_specific_alphabet = self.conditions.get(FIELD_SPECIFIC_ALPHABET)
field_bio_language = self.conditions.get(FIELD_BIO_LANGUAGE)
field_bio_banned_language = self.conditions.get(FIELD_BIO_BANNED_LANGUAGE)
field_min_posts = self.conditions.get(FIELD_MIN_POSTS)
field_mutual_friends = self.conditions.get(FIELD_MUTUAL_FRIENDS, -1)
field_skip_if_link_in_bio = self.conditions.get(
FIELD_SKIP_IF_LINK_IN_BIO, False
)
field_skip_if_private = self.conditions.get(FIELD_SKIP_PRIVATE, False)
field_skip_if_public = self.conditions.get(FIELD_SKIP_PUBLIC, False)
profile_data = self.get_all_data(device)
if profile_data.is_restricted:
logger.info(
"This is a restricted profile, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.RESTRICTED
)
if profile_data.follow_button_text == FollowStatus.NONE or None in (
profile_data.followers,
profile_data.followings,
profile_data.posts_count,
):
logger.info(
"Profile was not fully loaded, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.NOT_LOADED
)
if self.conditions is None:
logger.debug("filters.yml not loaded!")
return profile_data, False
if (
field_skip_following
and profile_data.follow_button_text == FollowStatus.FOLLOWING
):
logger.info(
f"You follow @{username}, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.YOU_FOLLOW
)
if (
field_skip_follower
and profile_data.follow_button_text == FollowStatus.FOLLOW_BACK
):
logger.info(
f"@{username} follows you, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.FOLLOW_YOU
)
logger.debug(
f"This account is {'private' if profile_data.is_private else 'public'}."
)
if profile_data.is_private and field_skip_if_public:
logger.info(
f"@{username} has public account and you want to interract only private, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.IS_PUBLIC
)
elif profile_data.is_private and field_skip_if_private:
logger.info(
f"@{username} has private account and you want to interract only public, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.IS_PRIVATE
)
elif profile_data.is_private is None:
logger.info(
f"Could not determine if @{username} is public or private, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.UNKNOWN_PRIVACY
)
logger.debug("Checking if account is within follower/following parameters...")
if field_min_followers is not None and profile_data.followers < int(
field_min_followers
):
logger.info(
f"@{username} has less than {field_min_followers} followers, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.LT_FOLLOWERS
)
if field_max_followers is not None and profile_data.followers > int(
field_max_followers
):
logger.info(
f"@{username} has more than {field_max_followers} followers, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.GT_FOLLOWERS
)
if field_min_followings is not None and profile_data.followings < int(
field_min_followings
):
logger.info(
f"@{username} has less than {field_min_followings} followings, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.LT_FOLLOWINGS
)
if field_max_followings is not None and profile_data.followings > int(
field_max_followings
):
logger.info(
f"@{username} has more than {field_max_followings} followings, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.GT_FOLLOWINGS
)
if (field_min_potency_ratio != 0 or field_max_potency_ratio != 999) and (
(
int(profile_data.followings) == 0
or profile_data.followers / profile_data.followings
< float(field_min_potency_ratio)
or profile_data.followers / profile_data.followings
> float(field_max_potency_ratio)
)
):
logger.info(
f"@{username}'s potency ratio is not between {field_min_potency_ratio} and {field_max_potency_ratio}, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.POTENCY_RATIO
)
if field_mutual_friends != -1:
logger.debug(
f"Checking if that user has at least {field_mutual_friends} mutual friends."
)
if profile_data.mutual_friends < field_mutual_friends:
logger.info(
f"@{username} has less then {field_mutual_friends} mutual friends, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.LT_MUTUAL
)
if field_skip_if_link_in_bio:
logger.debug("Checking if account has link in bio...")
if profile_data.link_in_bio is not None:
logger.info(
f"@{username} has a link in bio, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.HAS_LINK_IN_BIO
)
if field_skip_business or field_skip_non_business:
logger.debug("Checking if account is a business...")
if field_skip_business and profile_data.has_business_category is True:
logger.info(
f"@{username} has business account, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.HAS_BUSINESS
)
if field_skip_non_business and profile_data.has_business_category is False:
logger.info(
f"@{username} has non business account, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.HAS_NON_BUSINESS
)
if field_min_posts is not None and field_min_posts > profile_data.posts_count:
logger.info(
f"@{username} doesn't have enough posts ({profile_data.posts_count}), skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.NOT_ENOUGH_POSTS
)
cleaned_biography = " ".join(
emoji.get_emoji_regexp()
.sub("", profile_data.biography.replace("\n", ""))
.lower()
.split()
)
if not cleaned_biography and (
len(field_mandatory_words) > 0
or field_bio_language is not None
or field_specific_alphabet is not None
):
logger.info(
f"@{username} has an empty biography, that means there isn't any mandatory things that can be checked. Skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.BIOGRAPHY_IS_EMPTY
)
if (
len(field_blacklist_words) > 0
or len(field_mandatory_words) > 0
or field_specific_alphabet is not None
or field_bio_language is not None
or field_bio_banned_language is not None
):
logger.debug("Pulling biography...")
if len(field_blacklist_words) > 0:
logger.debug(
"Checking if account has blacklisted words in biography..."
)
# If we found a blacklist word return False
for w in field_blacklist_words:
blacklist_words = re.compile(
r"\b({0})\b".format(w), flags=re.IGNORECASE
).search(cleaned_biography)
if blacklist_words is not None:
logger.info(
f"@{username} found a blacklisted word '{w}' in biography, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.BLACKLISTED_WORD
)
if len(field_mandatory_words) > 0:
logger.debug("Checking if account has mandatory words in biography...")
mandatory_words = [
w
for w in field_mandatory_words
if re.compile(r"\b({0})\b".format(w), flags=re.IGNORECASE).search(
cleaned_biography
)
is not None
]
if mandatory_words == []:
logger.info(
f"@{username} mandatory words not found in biography, skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.MISSING_MANDATORY_WORDS
)
if field_specific_alphabet is not None:
logger.debug("Checking primary character set of account biography...")
alphabet = self._find_alphabet(cleaned_biography)
if alphabet not in field_specific_alphabet and alphabet != "":
logger.info(
f"@{username}'s biography alphabet is not in {', '.join(field_specific_alphabet)}. ({alphabet}), skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username, profile_data, SkipReason.ALPHABET_NOT_MATCH
)
if field_bio_language is not None or field_bio_banned_language is not None:
skip_1 = skip_2 = False
logger.debug("Checking main language of account biography...")
language = self._find_language(cleaned_biography)
if (
field_bio_banned_language
and language in field_bio_banned_language
and language != ""
):
logger.info(
f"@{username}'s biography language is in the banned list: {', '.join(field_bio_banned_language)}. ({language}), skip.",
extra={"color": f"{Fore.CYAN}"},
)
skip_1 = True
if (
not skip_1
and field_bio_language
and language not in field_bio_language
and language != ""
):
logger.info(
f"@{username}'s biography language is not in the list: {', '.join(field_bio_language)}. ({language}), skip.",
extra={"color": f"{Fore.CYAN}"},
)
skip_2 = True
if skip_1 or skip_2:
return profile_data, self.return_check_profile(
username,
profile_data,
SkipReason.BIOGRAPHY_LANGUAGE_NOT_MATCH,
)
if field_specific_alphabet is not None:
logger.debug("Checking primary character set of name...")
if profile_data.fullname != "":
alphabet = self._find_alphabet(profile_data.fullname)
if alphabet not in field_specific_alphabet and alphabet != "":
logger.info(
f"@{username}'s name alphabet is not in {', '.join(field_specific_alphabet)}. ({alphabet}), skip.",
extra={"color": f"{Fore.CYAN}"},
)
return profile_data, self.return_check_profile(
username,
profile_data,
SkipReason.ALPHABET_NAME_NOT_MATCH,
)
# If no filters return false, we are good to proceed
return profile_data, self.return_check_profile(username, profile_data, None)
def can_follow_private_or_empty(self) -> bool:
if self.conditions is None:
return False
field_follow_private_or_empty = self.conditions.get(
FIELD_FOLLOW_PRIVATE_OR_EMPTY
)
return field_follow_private_or_empty is not None and bool(
field_follow_private_or_empty
)
def can_pm_to_private_or_empty(self) -> bool:
if self.conditions is None:
return False
field_pm_to_private_or_empty = self.conditions.get(FIELD_PM_TO_PRIVATE_OR_EMPTY)
return field_pm_to_private_or_empty is not None and bool(
field_pm_to_private_or_empty
)
def can_comment(self, current_mode) -> Tuple[bool, bool, bool, bool]:
if self.conditions is not None:
return (
self.conditions.get(FIELD_COMMENT_PHOTOS, True),
self.conditions.get(FIELD_COMMENT_VIDEOS, True),
self.conditions.get(FIELD_COMMENT_CAROUSELS, True),
self.conditions.get("comment_" + current_mode.replace("-", "_"), False),
)
else:
logger.debug("filters.yml (or legacy filter.json) is not loaded!")
return False, False, False, False
def get_all_data(self, device):
profile_picture = device.find(
resourceIdMatches=ResourceID.PROFILE_HEADER_AVATAR_CONTAINER_TOP_LEFT_STUB
)
restricted_profile = device.find(
resourceIdMatches=ResourceID.RESTRICTED_ACCOUNT_TITLE
)
is_restricted = False
if not profile_picture.exists(Timeout.LONG):
if restricted_profile.exists():
is_restricted = True
else:
logger.warning(
"Looks like this profile hasn't loaded yet! Wait a little bit more.."
)
if profile_picture.exists(Timeout.LONG):
logger.info("Profile loaded!")
else:
logger.warning(
"Profile not fully loaded after 16s. Is your connection ok? Let's sleep for 1-2 minutes."
)
random_sleep(60, 120, modulable=False)
if profile_picture.exists():
logger.warning(
"Profile won't load! Maybe you're soft-banned or you've lost your connection!"
)
profileView = ProfileView(device)
if not is_restricted:
profile = Profile(
mutual_friends=self._get_mutual_friends(device, profileView),
follow_button_text=self._get_follow_button_text(device, profileView),
is_restricted=is_restricted,
is_private=self._is_private_account(device, profileView),
has_business_category=self._has_business_category(device, profileView),
posts_count=self._get_posts_count(device, profileView),
biography=self._get_profile_biography(device, profileView),
link_in_bio=self._get_link_in_bio(device, profileView),
fullname=self._get_fullname(device, profileView),
)
followers, following = self._get_followers_and_followings(device)
profile.set_followers_and_following(followers, following)
else:
profile = Profile(
mutual_friends=None,
follow_button_text=None,
is_restricted=is_restricted,
is_private=None,
has_business_category=None,
posts_count=None,
biography=None,
link_in_bio=None,
fullname=None,
)
profile.set_followers_and_following(None, None)
return profile
@staticmethod
def _get_followers_and_followings(
device, profileView: ProfileView = None
) -> Tuple[int, int]:
followers = 0
profileView = ProfileView(device) if profileView is None else profileView
try:
followers = profileView.getFollowersCount()
except Exception as e:
logger.error(f"Cannot find followers count view, default is {followers}.")
logger.debug(f"Error: {e}")
followings = 0
try:
followings = profileView.getFollowingCount()
except Exception as e:
logger.error(f"Cannot find followings count view, default is {followings}.")
logger.debug(f"Error: {e}")
if followers is not None and followings is not None:
return followers, followings
else:
return 0, 1
@staticmethod
def _has_business_category(device, ProfileView=None) -> bool:
business_category_view = device.find(
resourceId=ResourceID.PROFILE_HEADER_BUSINESS_CATEGORY,
)
return business_category_view.exists()
@staticmethod
def _is_private_account(device, profileView: ProfileView = None) -> Optional[bool]:
private = None
profileView = ProfileView(device) if profileView is None else profileView
try:
private = profileView.isPrivateAccount()
except Exception as e:
logger.error("Cannot find whether it is private or not")
logger.debug(f"Error: {e}")
return private
@staticmethod
def _get_profile_biography(device, profileView: ProfileView = None) -> str:
profileView = ProfileView(device) if profileView is None else profileView
return profileView.getProfileBiography()
@staticmethod
def _find_alphabet(biography: str) -> str:
a_dict = {}
max_alph = "UNKNOWN"
try:
for x in range(len(biography)):
if biography[x].isalpha():
a = unicodedata.name(biography[x]).split(" ")[0]
if a not in IGNORE_CHARSETS:
if a in a_dict:
a_dict[a] += 1
else:
a_dict[a] = 1
if bool(a_dict):
max_alph = max(a_dict, key=lambda k: a_dict[k])
except Exception as e:
logger.error(f"Cannot determine primary alphabet. Error: {e}")
return max_alph
@staticmethod
def _find_language(biography: str) -> str:
"""Language detection algorithm is non-deterministic, which means that if you try to run it on a text which is either too short or too ambiguous, you might get different results everytime you run it."""
language = ""
results = []
try:
for _ in range(5):
# we do a BO5, that would mitigate the inconsistency a little bit
results.append(detect(biography))
language = max(results, key=results.count)
except Exception as e:
logger.error(f"Cannot determine primary language. Error: {e}")
return language
@staticmethod
def _get_fullname(device, profileView: ProfileView = None) -> str:
profileView = ProfileView(device) if profileView is None else profileView
fullname = ""
try:
fullname = profileView.getFullName()
except Exception as e:
logger.error("Cannot find full name.")
logger.debug(f"Error: {e}")
return fullname
@staticmethod
def _get_posts_count(device, profileView: ProfileView = None) -> int:
profileView = ProfileView(device) if profileView is None else profileView
posts_count = 0
try:
posts_count = profileView.getPostsCount()
except Exception as e:
logger.error("Cannot find posts count. Default is 0.")
logger.debug(f"Error: {e}")
return posts_count
@staticmethod
def _get_follow_button_text(device, profileView: ProfileView = None) -> str:
profileView = ProfileView(device) if profileView is None else profileView
_, text = profileView.getFollowButton()
return text
@staticmethod
def _get_mutual_friends(device, profileView: ProfileView = None) -> int:
profileView = ProfileView(device) if profileView is None else profileView
return profileView.getMutualFriends()
@staticmethod
def _get_link_in_bio(device, profileView: ProfileView = None) -> str:
profileView = ProfileView(device) if profileView is None else profileView
return profileView.getLinkInBio()
|
py | 7df8d41723701cb64ac218891fb6a44d97b575f6 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 29328)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 51474)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
py | 7df8d43c6488495bf76fef17bd52a9cc02e8e4fd | #!/usr/bin/python3
import os
from formula import formula
user = os.environ.get("GIT_USERNAME")
key = os.environ.get("GIT_TOKEN")
repository = os.environ.get("RIT_GITHUB_REPOSITORY")
contribution = os.environ.get("CONTRIBUTION")
formula.Run(user, key, repository, contribution)
|
py | 7df8d4a7b829a079831bf8cacc228e7760fae655 | # %% [markdown]
"""
# Drawing polygons with matplotlib
I want to draw tilings as collections of polygons to able to color/emphasize certain polygons. Here I explore using matplotlib and shapely to draw polygons.
"""
# %% tags=['hide-cell']
from IPython import get_ipython
if get_ipython() is not None:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# %%
import matplotlib.pyplot as plt
from tilings import base as b
from tilings import utils as u
import shapely.geometry as sg
import shapely.affinity as sa
import numpy as np
from descartes import PolygonPatch
# %%
if get_ipython() is not None:
get_ipython().run_line_magic('matplotlib', 'inline')
# %% [markdown]
"""
## matplotlib
Testing the fill function:
"""
# %%
fig, ax = plt.subplots(figsize=(5, 5))
ax.fill([0, 1, 2, 0], [1, 1, 0, 1], c=plt.cm.Dark2.colors[0])
# %% [markdown]
"""
## shapely
I think shapely will be useful to check that polygons don't overlap.
"""
# %%
polygon1 = sg.Polygon([[0, 0], [1, 0], [0.5, np.sqrt(3)/2]])
polygon2 = sg.Polygon([[1, 0], [0.5, np.sqrt(3)/2], [1.5, np.sqrt(3)/2]])
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_ylim(bottom=0, top=2)
ax.set_xlim(left=0, right=2)
ax.add_patch(PolygonPatch(polygon1, fc=plt.cm.Dark2.colors[0]))
ax.add_patch(PolygonPatch(polygon2, fc=plt.cm.Dark2.colors[1]))
# %%
polygon1.overlaps(polygon2)
# %%
polygon1.touches(polygon2)
# %% [markdown]
"""
Testing drawing a simple tiling:
"""
# %%
verts = [b.Vertex(xy=[0,0]), b.Vertex(xy=[0,1])]
t = b.Tiling(vertices=verts)
# %%
fig, ax = plt.subplots(figsize=(5, 5))
t.draw(ax)
# %% [markdown]
"""
Adding a square to an edge
"""
# %%
edge = [sg.Point([-1,1]), sg.Point([1,-1])]
fig, ax = u.setup_plot(5)
new_pts = [
sa.rotate(edge[1], angle=-90, origin=[edge[0].x,edge[0].y]),
sa.rotate(edge[0], angle=90, origin=[edge[1].x,edge[1].y]),
]
u.draw_pts(ax, edge + new_pts)
# %%
c = u.complete(edge)
fig, ax = u.setup_plot(5)
for p in c:
ax.add_patch(PolygonPatch(p, alpha=0.25))
# %%
|
py | 7df8d4d958e76b33e765b420159ea5bf4b9bc72a | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from iam import Action, Subject, Request
from iam.exceptions import AuthFailedException
from gcloud.iam_auth import IAMMeta
from gcloud.iam_auth import get_iam_client
from gcloud.iam_auth.intercept import ViewInterceptor
iam = get_iam_client()
class AdminSingleActionViewInpterceptor(ViewInterceptor):
def process(self, request, *args, **kwargs):
subject = Subject("user", request.user.username)
action = Action(self.action)
request = Request(IAMMeta.SYSTEM_ID, subject, action, [], {})
allowed = iam.is_allowed(request)
if not allowed:
raise AuthFailedException(IAMMeta.SYSTEM_ID, subject, action, [])
class AdminViewViewInterceptor(AdminSingleActionViewInpterceptor):
action = IAMMeta.ADMIN_VIEW_ACTION
class AdminEditViewInterceptor(AdminSingleActionViewInpterceptor):
action = IAMMeta.ADMIN_EDIT_ACTION
|
py | 7df8d577fb2f31366be23a47f1703d69024644cc | """
Django settings for instaProject project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from decouple import config
import cloudinary
import cloudinary.uploader
import cloudinary.api
import django.conf.global_settings as DEFAULT_SETTINGS
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# cloudinary
# cloudinary.config(
# cloud_name = "sample",
# api_key = "874837483274837",
# api_secret = "a676b67565c6767a6767d6767f676fe1"
# )
ALLOWED_HOSTS = ['*']
# Email configurations remember to install python-decouple
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
# HMAC
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'instaApp',
'bootstrap3',
'tinymce',
'cloudinary'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'instaProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instaProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'instaclone',
'USER': 'sami_mai',
'PASSWORD': 'SmaiDB',
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Login redirect
LOGIN_REDIRECT_URL = '/'
|
py | 7df8d6480cf34d1f626610354841238cc27ba57c | # flake8: noqa
import warnings
__version__ = '0.34.0'
# explainers
from .explainers.kernel import KernelExplainer, kmeans
from .explainers.sampling import SamplingExplainer
from .explainers.tree import TreeExplainer, Tree
from .explainers.deep import DeepExplainer
from .explainers.gradient import GradientExplainer
from .explainers.linear import LinearExplainer
from .explainers.partition import PartitionExplainer
from .explainers.bruteforce import BruteForceExplainer
from .explainers.permutation import PermutationExplainer
from .explainers.additive import AdditiveExplainer
from .explainers import other
# plotting (only loaded if matplotlib is present)
def unsupported(*args, **kwargs):
warnings.warn("matplotlib is not installed so plotting is not available! Run `pip install matplotlib` to fix this.")
try:
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
if have_matplotlib:
from .plots.summary import summary_plot
from .plots.decision import decision_plot, multioutput_decision_plot
from .plots.dependence import dependence_plot
from .plots.force import force_plot, initjs, save_html
from .plots.image import image_plot
from .plots.monitoring import monitoring_plot
from .plots.embedding import embedding_plot
from .plots.partial_dependence import partial_dependence_plot
from .plots.bar import bar_plot
from .plots.waterfall import waterfall_plot
else:
summary_plot = unsupported
decision_plot = unsupported
multioutput_decision_plot = unsupported
dependence_plot = unsupported
force_plot = unsupported
initjs = unsupported
save_html = unsupported
image_plot = unsupported
monitoring_plot = unsupported
embedding_plot = unsupported
partial_dependence_plot = unsupported
bar_plot = unsupported
# other stuff :)
from . import datasets
#from . import benchmark
from .common import approximate_interactions, hclust_ordering, sample
|
py | 7df8d74aedd52f5af7302871a88888f0fac59e61 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Optional, Union, Iterator
from textwrap import dedent
import numpy as np
from .abstract_hilbert import AbstractHilbert
max_states = np.iinfo(np.int32).max
"""int: Maximum number of states that can be indexed"""
def _is_indexable(shape):
"""
Returns whether a discrete Hilbert space of shape `shape` is
indexable (i.e., its total number of states is below the maximum).
"""
log_max = np.log(max_states)
return np.sum(np.log(shape)) <= log_max
class NoneTypeT:
pass
NoneType = NoneTypeT()
legacy_warn_str = (
"This choice of arguments for `hilbert.random_state` is deprecated and "
"will be removed in a future version.\n"
"The new syntax is\n"
" hilbert.random_state(prngkey, size=1, dtype=jnp.float32)\n"
"and, like in `jax.random`, the first argument is mandatory and must be a valid jax `PRNGKey`."
"Results may differ between the states generated by the legacy, "
"deprecated code and the new one. Note that the old code does not "
"support defining custom methods."
)
class DiscreteHilbert(AbstractHilbert):
"""Abstract class for an hilbert space defined on a lattice.
This class definese the common interface that can be used to
interact with hilbert spaces on lattices.
"""
def __init__(self, shape: Tuple[int, ...]):
"""
Initializes a discrete Hilbert space with a basis of given shape.
Args:
shape: The local dimension of the Hilbert space for each degree
of freedom.
"""
self._shape = shape
super().__init__()
@property
def shape(self) -> Tuple[int, ...]:
r"""The size of the hilbert space on every site."""
return self._shape
@property
def is_finite(self) -> bool:
r"""Whether the local hilbert space is finite."""
raise NotImplementedError( # pragma: no cover
dedent(
f"""
`is_finite` is not implemented for discrete hilbert
space {type(self)}.
"""
)
)
@property
def n_states(self) -> int:
r"""The total dimension of the many-body Hilbert space.
Throws an exception iff the space is not indexable."""
raise NotImplementedError( # pragma: no cover
dedent(
f"""
`n_states` is not implemented for discrete hilbert
space {type(self)}.
"""
)
)
def size_at_index(self, i: int) -> int:
r"""Size of the local degrees of freedom for the i-th variable.
Args:
i: The index of the desired site
Returns:
The number of degrees of freedom at that site
"""
return self.shape[i] # pragma: no cover
def states_at_index(self, i: int) -> Optional[List[float]]:
r"""A list of discrete local quantum numbers at the site i.
If the local states are infinitely many, None is returned.
Args:
i: The index of the desired site.
Returns:
A list of values or None if there are infintely many.
"""
raise NotImplementedError() # pragma: no cover
def numbers_to_states(
self, numbers: Union[int, np.ndarray], out: Optional[np.ndarray] = None
) -> np.ndarray:
r"""Returns the quantum numbers corresponding to the n-th basis state
for input n. n is an array of integer indices such that
:code:`numbers[k]=Index(states[k])`.
Throws an exception iff the space is not indexable.
Args:
numbers (numpy.array): Batch of input numbers to be converted into arrays of
quantum numbers.
out: Optional Array of quantum numbers corresponding to numbers.
"""
if out is None:
out = np.empty((np.atleast_1d(numbers).shape[0], self.size))
if np.any(numbers >= self.n_states):
raise ValueError("numbers outside the range of allowed states")
if np.isscalar(numbers):
return self._numbers_to_states(np.atleast_1d(numbers), out=out)[0, :]
else:
return self._numbers_to_states(numbers, out=out)
def states_to_numbers(
self, states: np.ndarray, out: Optional[np.ndarray] = None
) -> Union[int, np.ndarray]:
r"""Returns the basis state number corresponding to given quantum states.
The states are given in a batch, such that states[k] has shape (hilbert.size).
Throws an exception iff the space is not indexable.
Args:
states: Batch of states to be converted into the corresponding integers.
out: Array of integers such that out[k]=Index(states[k]).
If None, memory is allocated.
Returns:
numpy.darray: Array of integers corresponding to out.
"""
if states.shape[-1] != self.size:
raise ValueError(
f"Size of this state ({states.shape[-1]}) not"
f"corresponding to this hilbert space {self.size}"
)
states_r = np.asarray(np.reshape(states, (-1, states.shape[-1])))
if out is None:
out = np.empty(states_r.shape[:-1], dtype=np.int64)
out = self._states_to_numbers(states_r, out=out.reshape(-1))
if states.ndim == 1:
return out[0]
else:
return out.reshape(states.shape[:-1])
def states(self) -> Iterator[np.ndarray]:
r"""Returns an iterator over all valid configurations of the Hilbert space.
Throws an exception iff the space is not indexable.
Iterating over all states with this method is typically inefficient,
and ```all_states``` should be prefered.
"""
for i in range(self.n_states):
yield self.numbers_to_states(i).reshape(-1)
def all_states(self, out: Optional[np.ndarray] = None) -> np.ndarray:
r"""Returns all valid states of the Hilbert space.
Throws an exception if the space is not indexable.
Args:
out: an optional pre-allocated output array
Returns:
A (n_states x size) batch of statess. this corresponds
to the pre-allocated array if it was passed.
"""
numbers = np.arange(0, self.n_states, dtype=np.int64)
return self.numbers_to_states(numbers, out)
@property
def is_indexable(self) -> bool:
"""Whever the space can be indexed with an integer"""
if not self.is_finite:
return False
return _is_indexable(self.shape)
def __mul__(self, other: "DiscreteHilbert"):
if self == other:
return self ** 2
else:
from .tensor_hilbert import TensorHilbert
if type(self) == type(other):
res = self._mul_sametype_(other)
if res is not NotImplemented:
return res
return TensorHilbert(self) * other
|
py | 7df8d785809908ce021e7d12f0d032b33a9592fb | from django.contrib import admin
from apps.heeds.models import Heed
admin.site.register(Heed)
|
py | 7df8d91162731b64e06316dc02d829436559c923 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); |
py | 7df8d9225533d0cef073442a54d22e33dd382b03 | #!/usr/bin/env python3
# Copyright (c) 2012-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/nexolstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *nexol_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("nexolcore", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("nexolcore", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("nexolcore", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("nexolcore", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
py | 7df8da077cfdaf7846ff3ecdc1275aeda51eb77b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Simon Schaefer
# Description : Task aware image downscaling autoencoder model - SCALING.
# Convolutional layers only (no resblocks).
# =============================================================================
import torch
from torch import nn
from tar.modules import _Resblock_, _ReversePixelShuffle_
def build_net():
return CONV_ONLY_LARGE()
class CONV_ONLY_LARGE(nn.Module):
def __init__(self):
super(CONV_ONLY_LARGE, self).__init__()
# Build encoding part.
self._downscaling = nn.Sequential(
nn.Conv2d(3, 8, 3, stride=1, padding=1),
nn.Conv2d(8, 16, 3, stride=1, padding=1),
_ReversePixelShuffle_(downscale_factor=2),
)
self._conv_en1 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en5 = nn.Conv2d(64, 3, 3, stride=1, padding=1)
# Build decoding part.
self._conv_de1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)
self._conv_de2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_de3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_de4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_de5 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._upscaling = nn.Sequential(
nn.Conv2d(64, 256, 3, stride=1, padding=1),
nn.PixelShuffle(upscale_factor=2),
nn.Conv2d(64, 3, 3, stride=1, padding=1)
)
def encode(self, x: torch.Tensor) -> torch.Tensor: # b, 3, p, p
x = self._downscaling(x) # b, 64, p/2, p/2
residual = x
x = self._conv_en1(x) # b, 64, p/2, p/2
x = self._conv_en2(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._conv_en3(x) # b, 64, p/2, p/2
x = self._conv_en4(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._conv_en5(x) # b, 3, p/2, p/2
return x
def decode(self, x: torch.Tensor) -> torch.Tensor:
x = self._conv_de1(x) # b, 64, p/2, p/2
residual = x
x = self._conv_de2(x) # b, 64, p/2, p/2
x = self._conv_de3(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._conv_de4(x) # b, 64, p/2, p/2
x = self._conv_de5(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._upscaling(x) # b, 3, p, p
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.decode(self.encode(x))
|
py | 7df8da09c7147a0737e86745bbe0c69010be33f0 | import numpy as np
import tensorflow as tf
import utils as ut
a = np.zeros([1,13,13,5,4])
a[0,0,0,0] = [0.5,0.5,1,1] # Square with centroid at (0.5,0.5)
# and shape (1,1) -> (w,h)
b = np.zeros([1,13,13,5,4])
b[0,0,0,0] = [-0.5,-0.5,2,2] # Square with centroid at (-0.5,-0.5)
# and shape (2,2) -> (w,h)
bb1 = tf.placeholder(tf.float32,shape=[None,13,13,5,4])
bb2 = tf.placeholder(tf.float32,shape=[None,13,13,5,4])
iou_res = ut.iou(bb1,bb2)
sess = tf.Session()
iou = sess.run(iou_res,feed_dict={bb1:a,bb2:b})
print(iou[0,0,0,0]) # IoU will be ~0.052631
|
py | 7df8da6b066a5fa87d81717343c8cb3aa36a7444 | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from easydict import EasyDict
import tensorflow as tf
from lmnet.common import Tasks
from lmnet.networks.object_detection.yolo_v2_quantize import YoloV2Quantize
from lmnet.datasets.lm_things_on_a_table import LmThingsOnATable
from lmnet.data_processor import Sequence
from lmnet.pre_processor import (
ResizeWithGtBoxes,
DivideBy255,
)
from lmnet.post_processor import (
FormatYoloV2,
ExcludeLowScoreBox,
NMS,
)
from lmnet.data_augmentor import (
Brightness,
Color,
Contrast,
FlipLeftRight,
Hue,
SSDRandomCrop,
)
from lmnet.quantizations import (
binary_channel_wise_mean_scaling_quantizer,
linear_mid_tread_half_quantizer,
)
IS_DEBUG = False
NETWORK_CLASS = YoloV2Quantize
DATASET_CLASS = LmThingsOnATable
IMAGE_SIZE = [128, 128]
BATCH_SIZE = 8
DATA_FORMAT = "NCHW"
TASK = Tasks.OBJECT_DETECTION
CLASSES = DATASET_CLASS.classes
KEEP_CHECKPOINT_MAX = 5
MAX_EPOCHS = 1
SAVE_CHECKPOINT_STEPS = 100
TEST_STEPS = 100
SUMMARISE_STEPS = 10
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""
PRE_PROCESSOR = Sequence([
ResizeWithGtBoxes(size=IMAGE_SIZE),
DivideBy255()
])
anchors = [
(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)
]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
FormatYoloV2(
image_size=IMAGE_SIZE,
classes=CLASSES,
anchors=anchors,
data_format=DATA_FORMAT,
),
ExcludeLowScoreBox(threshold=score_threshold),
NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,),
])
NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
_epoch_steps = int(16551 / BATCH_SIZE)
NETWORK.LEARNING_RATE_KWARGS = {
"values": [1e-6, 1e-4, 1e-5, 1e-6, 1e-7],
"boundaries": [_epoch_steps, _epoch_steps * 10, _epoch_steps * 60, _epoch_steps * 90],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.OBJECT_SCALE = 5.0
NETWORK.NO_OBJECT_SCALE = 1.0
NETWORK.CLASS_SCALE = 1.0
NETWORK.COORDINATE_SCALE = 1.0
NETWORK.LOSS_IOU_THRESHOLD = 0.6
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.SCORE_THRESHOLD = score_threshold
NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold
NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size
NETWORK.LOSS_WARMUP_STEPS = int(1280 / BATCH_SIZE)
# quantization
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
'bit': 2,
'max_value': 2.0
}
NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}
NETWORK.QUANTIZE_FIRST_CONVOLUTION = True
NETWORK.QUANTIZE_LAST_CONVOLUTION = False
# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
FlipLeftRight(),
Brightness((0.75, 1.25)),
Color((0.75, 1.25)),
Contrast((0.75, 1.25)),
Hue((-10, 10)),
SSDRandomCrop(min_crop_ratio=0.7),
])
|
py | 7df8dbd5f1a10c8d9822bf22b35f198638cac35f | from unittest import TestCase, skip
from pymongo import MongoClient
from passerine.db.driver.mongodriver import Driver
from passerine.db.session import Session
class DbTestCase(TestCase):
default_collection_name = 't3test'
verify_data = False
connection = MongoClient() # used for setup-cleanup operation
session = None
driver = None
def collection_name(self):
return self.default_collection_name
def open_driver(self):
self.driver = Driver({'name': self.collection_name()})
self.driver.connect()
def close_driver(self):
del self.driver
def setUp(self):
self._setUp()
def _setUp(self):
self.connection.drop_database(self.collection_name())
self.open_driver()
self.session = Session(self.driver)
def tearDown(self):
if not self.verify_data:
self.connection.drop_database(self.collection_name())
self.close_driver()
del self.session
def _reset_db(self, data_set):
for data in data_set:
cls = data['class']
repo = self.session.repository(cls)
self.driver.drop_indexes(repo.name)
self.driver.drop(repo.name)
repo.setup_index()
for fixture in data['fixtures']:
self.driver.insert(repo.name, fixture)
if self.verify_data:
for fixture in self.driver.find(repo.name, {}):
print('{}: {}'.format(repo.name, fixture))
def _get_first(self, cls):
repo = self.session.repository(cls)
query = repo.new_criteria('e')
query.limit(1)
return repo.find(query)
def _get_all(self, cls):
repo = self.session.repository(cls)
query = repo.new_criteria('e')
return repo.find(query)
def _find_one_by_name(self, cls, name):
repo = self.session.repository(cls)
query = repo.new_criteria('e')
query.expect('e.name = :name')
query.define('name', name)
query.limit(1)
return repo.find(query)
|
py | 7df8dc5ccc0bec1efbad636db0beddb9ef545fc5 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy
import reformer
import util
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.autograd import Variable
"""
CS224N course project model implementation: Transformer
"""
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask=None, dropout=0.0):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
# (Dropout described below)
p_attn = F.dropout(p_attn, p=dropout)
return torch.matmul(p_attn, value), p_attn
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity we apply the norm first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer function that maintains the same size."
return x + self.dropout(sublayer(self.norm(x)))
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
# Torch linears have a `b` by default.
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.p = dropout
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask, dropout=self.p)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
class EncoderLayer(nn.Module):
"""
Encoder is made up of two sublayers, self-attn and feed forward (defined below)
b blocks of cnn sublayers, each with c Conv1d
"""
# N=6, d_model=512, d_ff=2048, h=8, dropout=0.1
def __init__(self, size=512, d_ff=2048, h=8, dropout=0.1, kernel = 7, c = 4):
super(EncoderLayer, self).__init__()
self.c = c
self.conv1d = nn.Sequential(
nn.Conv1d(size, size, kernel, bias=True, padding=kernel//2),
nn.ReLU()
)
self.self_attn = MultiHeadedAttention(h, size, dropout)
self.feed_forward = PositionwiseFeedForward(size, d_ff, dropout)
self.sublayer = clones(SublayerConnection(size, dropout), self.c + 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
# convolution
for i in range(self.c):
x = self.conv1d(x.transpose(1,2))
#x = torch.max(x, dim=2)
x = x.transpose(1,2)
x = self.sublayer[i](x, lambda x: x)
x = self.sublayer[self.c](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[self.c+1](x, self.feed_forward)
class TransformerEncoder(nn.Module):
"""
The transformer encoder part described in 'Attention is all you need'
b blocks of cnn sublayers, each with c Conv1d
"""
def __init__(self, hidden_size, N = 1, c = 4):
super(TransformerEncoder, self).__init__()
self.layer = EncoderLayer(size = hidden_size, c = c)
self.layers = clones(self.layer, N)
self.norm = LayerNorm(self.layer.size)
def forward(self, x, mask):
"""
Pass the input (and mask) through each layer in turn.
"""
# haroldmei
mask = torch.unsqueeze(mask, 1)
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# just copied from BiDAFOutput
class Transformer_Output(nn.Module):
"""
"""
def __init__(self, hidden_size, drop_prob):
super(Transformer_Output, self).__init__()
self.transformer = TransformerEncoder(hidden_size, N = 3)
self.att_linear_1 = nn.Linear(4 * hidden_size, 1)
self.mod_linear_1 = nn.Linear(hidden_size, 1)
self.att_linear_2 = nn.Linear(4 * hidden_size, 1)
self.mod_linear_2 = nn.Linear(hidden_size, 1)
def forward(self, att, mod, mask):
# Shapes: (batch_size, seq_len, 1)
logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)
mod_2 = self.transformer(mod, mask)
logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)
# Shapes: (batch_size, seq_len)
log_p1 = util.masked_softmax(logits_1.squeeze(), mask, log_softmax=True)
log_p2 = util.masked_softmax(logits_2.squeeze(), mask, log_softmax=True)
return log_p1, log_p2
class TransformerEncoderLayerEx(nn.Module):
def __init__(self, d_model, dropout=0.1, c = 4, kernel = 7):
super(TransformerEncoderLayerEx, self).__init__()
self.c = c
self.conv1d = [nn.Sequential(
nn.Conv1d(d_model, d_model, kernel, bias=True, padding=kernel//2).cuda(),
nn.ReLU()
)] * self.c
self.norm3 = [nn.modules.transformer.LayerNorm(d_model)] * self.c
self.dropout3 = [nn.modules.transformer.Dropout(dropout)] * self.c
def forward(self, src, src_mask=None, src_key_padding_mask=None):
for i in range(self.c):
src2 = self.conv1d[i](src.transpose(1,2)).transpose(1,2)
src = src + self.dropout3[i](src2)
src = self.norm3[i](src)
return src
# just copied from BiDAFOutput
class Transformer_OutputEx(nn.Module):
"""
"""
def __init__(self, hidden_size, mod_layers, drop_prob):
super(Transformer_OutputEx, self).__init__()
self.cnn = TransformerEncoderLayerEx(hidden_size,c=2)
self.transformer = nn.modules.transformer.TransformerEncoder(
nn.modules.transformer.TransformerEncoderLayer(hidden_size, 8, dropout=drop_prob),
mod_layers,
nn.modules.transformer.LayerNorm(hidden_size)
)
self.att_linear_1 = nn.Linear(4 * hidden_size, 1)
self.mod_linear_1 = nn.Linear(hidden_size, 1)
self.att_linear_2 = nn.Linear(4 * hidden_size, 1)
self.mod_linear_2 = nn.Linear(hidden_size, 1)
def forward(self, att, mod, mask):
# Shapes: (batch_size, seq_len, 1)
logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)
mod_2 = self.transformer(self.cnn(mod)) #, mask)
logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)
# Shapes: (batch_size, seq_len)
log_p1 = util.masked_softmax(logits_1.squeeze(), mask, log_softmax=True)
log_p2 = util.masked_softmax(logits_2.squeeze(), mask, log_softmax=True)
return log_p1, log_p2
"""
CS224N course project model implementation: Reformer
"""
class ReformerEncoder(nn.Module):
"""
The Reformer encoder part described in ''
"""
def __init__(self, hidden_size, depth = 12, drop_prob=0.1, bucket_size = 16, max_seq_len=512):
super(ReformerEncoder, self).__init__()
self.reformer = reformer.Reformer(
dim = hidden_size,
depth = depth,
bucket_size = bucket_size,
max_seq_len = max_seq_len,
heads = 8,
lsh_dropout = drop_prob,
causal = False
).cuda()
self.bucket_size = bucket_size
def forward(self, x, mask):
x = self.reformer(x)
return x
# just copied from BiDAFOutput
class Reformer_Output(nn.Module):
"""
"""
def __init__(self, hidden_size, drop_prob):
super(Reformer_Output, self).__init__()
self.transformer = ReformerEncoder(hidden_size, depth = 1)
self.att_linear_1 = nn.Linear(4 * hidden_size, 1)
self.mod_linear_1 = nn.Linear(hidden_size, 1)
self.att_linear_2 = nn.Linear(4 * hidden_size, 1)
self.mod_linear_2 = nn.Linear(hidden_size, 1)
def forward(self, att, mod, mask):
# Shapes: (batch_size, seq_len, 1)
logits_1 = self.att_linear_1(att) + self.mod_linear_1(mod)
mod_2 = self.transformer(mod, mask)
logits_2 = self.att_linear_2(att) + self.mod_linear_2(mod_2)
# Shapes: (batch_size, seq_len)
log_p1 = util.masked_softmax(logits_1.squeeze(), mask, log_softmax=True)
log_p2 = util.masked_softmax(logits_2.squeeze(), mask, log_softmax=True)
return log_p1, log_p2 |
py | 7df8de143602185ca5796b8f45cc9cbd84a11965 | #!/usr/bin/env python
# coding: utf-8
"""
File Name: eval_knn.py
Author: Wan Ji
E-mail: [email protected]
Created on: Mon Jan 4 10:36:31 2016 CST
"""
DESCRIPTION = """
"""
import os
import argparse
import logging
import numpy as np
from scipy.io import loadmat
from bottleneck import argpartsort
def runcmd(cmd):
""" Run command.
"""
logging.info("%s" % cmd)
os.system(cmd)
def getargs():
""" Parse program arguments.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('train_feat', type=str,
help='training features')
parser.add_argument('test_feat', type=str,
help='test features')
parser.add_argument('train_label', type=str,
help='training features')
parser.add_argument('test_label', type=str,
help='test features')
parser.add_argument("--Ks", type=int, nargs='+', default=[1],
help="Ks")
parser.add_argument("--num_test", type=int,
help="number of test images")
parser.add_argument("--dist_type", type=str, default='euclidean',
help="type of distance (euclidean, cosine, dotproduct)")
parser.add_argument("--log", type=str, default="INFO",
help="log level")
return parser.parse_args()
def normalization(feat):
"""
Feature normalization
"""
feat = feat.reshape(feat.shape[0], -1)
return feat / np.sqrt(np.sum(feat**2, axis=1)).reshape((feat.shape[0], 1))
def knn_classifer(knn_label):
# if knn_label.shape[1] is 1:
# return knn_label[:, 0]
num_label = knn_label.max() + 1
num_test = knn_label.shape[0]
pred = np.empty(num_test, np.int)
for i in xrange(num_test):
label_hist = np.histogram(knn_label[i], range(num_label+1))
pred[i] = label_hist[0].argmax()
return pred
def DotProduct(feat, query):
""" dot product distance.
"""
return -query.dot(feat.T)
def Euclidean(feat, query):
""" Euclidean distance.
"""
(nQ, D) = query.shape
(N, D) = feat.shape
dotprod = query.dot(feat.T)
qryl2norm = (query ** 2).sum(1).reshape(-1, 1)
featl2norm = (feat ** 2).sum(1).reshape(1, -1)
return qryl2norm + featl2norm - 2 * dotprod
def load_feat(feat_path, norm=False):
feat = loadmat(feat_path)['feat']
feat = feat.reshape(feat.shape[0], -1)
if norm:
feat = normalization(feat)
return feat
def main(args):
""" Main entry.
"""
logging.info("Loading features")
# trn_feat = normalization(loadmat(args.train_feat)['feat'])
# tst_feat = normalization(loadmat(args.test_feat)['feat'])
trn_feat = load_feat(args.train_feat).astype(np.float)
tst_feat = load_feat(args.test_feat).astype(np.float)
logging.info("\tDone!")
logging.info("Loading labels")
with open(args.train_label, 'r') as lbf:
trn_label = np.array([int(line.split()[1]) for line in lbf])
with open(args.test_label, 'r') as lbf:
tst_label = np.array([int(line.split()[1]) for line in lbf])
logging.info("\tDone!")
# print trn_feat.shape
# print tst_feat.shape
# print trn_label.shape
# print tst_label.shape
num_test = tst_feat.shape[0]
if args.num_test is not None and args.num_test < tst_feat.shape[0]:
num_test = args.num_test
tst_feat = tst_feat[:num_test]
tst_label = tst_label[:num_test]
logging.info("Computing `{}` distances".format(args.dist_type))
if args.dist_type == "euclidean":
dist = Euclidean(trn_feat, tst_feat)
elif args.dist_type == "cosine":
trn_feat = normalization(trn_feat)
tst_feat = normalization(tst_feat)
dist = DotProduct(trn_feat, tst_feat)
elif args.dist_type == "dotproduct":
dist = DotProduct(trn_feat, tst_feat)
else:
raise Exception("Invalid distance type.")
logging.info("\tDone!")
maxK = min(max(args.Ks), trn_feat.shape[0])
logging.info("Sorting")
idxs = np.empty((num_test, maxK), np.int)
for i in xrange(num_test):
cur_idxs = argpartsort(dist[i], maxK)[:maxK]
idxs[i, :] = cur_idxs[dist[i][cur_idxs].argsort()]
logging.info("\tDone!")
logging.info("Labeling")
knn_label = np.empty((num_test, maxK), np.int)
for i in xrange(num_test):
knn_label[i, :] = trn_label[idxs[i]]
logging.info("\tDone!")
logging.info("Evaluating")
for k in args.Ks:
pred = knn_classifer(knn_label[:, :k])
accy = (pred == tst_label).sum() * 100.0 / tst_label.shape[0]
logging.info("\t%4d-NN classifier: %.2f" % (k, accy))
logging.info("\tDone")
if __name__ == '__main__':
args = getargs()
numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: " + args.log)
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
level=numeric_level)
main(args)
|
py | 7df8de9869cf328d81e180907df0b8283e405c39 | import pytest
def test_tmpdir(tmpdir):
a_dir = tmpdir.mkdir('mytmpdir')
a_file = a_dir.join('tmpfile.txt')
a_file.write('hello, pytest!')
assert a_file.read() == 'hello, pytest!'
@pytest.fixture(scope='module')
def my_tmpdir_factory(tmpdir_factory):
a_dir = tmpdir_factory.mktemp('mytmpdir')
a_file = a_dir.join('tmpfile.txt')
a_file.write('hello, pytest!')
return a_file
|
py | 7df8df401d3e430c29784c8d8c3cf499a6a5bb4d | from os import environ
ENVIRONMENT_SETTINGS = [
"DATABASE_PASSWORD",
"VIRTUALENV_NAME",
]
def env_settings():
d = {}
for s in ENVIRONMENT_SETTINGS:
if s in environ:
d[s] = environ[s]
return d
|
py | 7df8e0fa0f31bd1bfb08fce73a3ae84a3fa24ac6 | import boto3
import json
import os
# Global variables are reused across execution contexts (if available)
session = boto3.Session()
def lambda_handler(event, context):
"""
AWS Lambda handler
Parameters
----------
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
message = get_message()
return {
"statusCode": 200,
"body": json.dumps(message)
}
def get_message():
return {"hello": "world"}
|
py | 7df8e254c21afe736806699847d02f1408c89d00 | import os
import time
from os.path import dirname, join
import shutil
import json
import numpy as np
from scipy.cluster.hierarchy import fcluster
from scipy.spatial.distance import squareform, pdist
import pandas as pd
try:
import matplotlib
import seaborn as sns
sns.set(context="paper", font="monospace")
MATPLOTLIB_INSTALLED = True
except:
MATPLOTLIB_INSTALLED = False
try:
import requests
REQUESTS_INSTALLED = True
except:
REQUESTS_INSTALLED = False
from clustermatch.utils.misc import get_temp_file_name
RESULTS_DIR = 'results'
def _get_condensed_distance_matrix(ensemble):
return pdist(ensemble.T, lambda u, v: (u != v).sum() / len(u))
def get_timestamp():
return time.strftime('%Y%m%d_%H%M%S')
def setup_results_dir(func):
def func_wrapper(*args, **kwargs):
results_dir = os.path.join(RESULTS_DIR, kwargs['timestamp'])
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
return func(*args, **kwargs)
return func_wrapper
def get_clustergrammer_link(square_matrix, names):
if not REQUESTS_INSTALLED:
raise ValueError('requests is not installed')
# save sim matrix as csv and get clustergrammer visualization
df = pd.DataFrame(square_matrix)
df['names'] = names
df = df.set_index('names')
df.columns = names
square_matrix_file = get_temp_file_name('txt')
df.to_csv(square_matrix_file, sep='\t', encoding='utf-8')
clustergrammer_link = ''
try:
upload_url = 'http://amp.pharm.mssm.edu/clustergrammer/matrix_upload/'
r = requests.post(upload_url, files={'file': open(square_matrix_file, 'rb')})
clustergrammer_link = r.text
except:
pass
return clustergrammer_link
@setup_results_dir
def to_binary(data, file_name, timestamp):
file_path = os.path.join(RESULTS_DIR, timestamp, file_name + '.pkl')
pd.to_pickle(data, file_path)
def from_binary(file_name):
return pd.read_pickle(file_name)
@setup_results_dir
def create_partition_plot_html(partition, timestamp, sources=None):
results_dir = os.path.join(RESULTS_DIR, timestamp)
html_dir = join(dirname(__file__), 'html')
for afile in os.listdir(html_dir):
afile_path = join(html_dir, afile)
shutil.copy(afile_path, results_dir)
if sources is not None:
sources = np.array(sources)
else:
sources = np.array([''] * len(partition))
# FIXME: create json and copy
json_path = os.path.join(RESULTS_DIR, timestamp, 'data' + '.json')
cluster_children = []
k_values = np.unique(partition)
for cluster_number in k_values:
idx = (partition == cluster_number).values
cluster_objects = partition[idx].index.tolist()
cluster_objects_sources = sources[idx]
cluster_children.append(
{'name': '',
'children': [{'name': obj_name, 'source': obj_source, 'size': 1}
for obj_name, obj_source in zip(cluster_objects, cluster_objects_sources)]
}
)
partition_json = {'name': '', 'children': cluster_children}
with open(json_path, 'w') as jf:
json.dump(partition_json, jf)
return join(results_dir, 'index.html')
@setup_results_dir
def save_ensembles(ensembles, timestamp):
for ens_name, ens in ensembles.items():
ensemble_path = os.path.join(RESULTS_DIR, timestamp, ens_name + '_ensemble' + '.xls')
ens.to_excel(ensemble_path, encoding='utf-8')
@setup_results_dir
def append_data_description(text, timestamp):
filepath = os.path.join(RESULTS_DIR, timestamp, 'README.txt')
with open(filepath, 'a') as f:
f.write('\n')
f.write(text + '\n')
@setup_results_dir
def write_text_file(text_content, file_name, timestamp):
filepath = os.path.join(RESULTS_DIR, timestamp, file_name)
with open(filepath, 'w') as f:
f.write(text_content)
@setup_results_dir
def write_data_description(data_files, merged_sources, feature_names, sources_names, timestamp):
filepath = os.path.join(RESULTS_DIR, timestamp, 'README.txt')
with open(filepath, 'w') as f:
f.write('Data files included:\n')
for afile in data_files:
f.write(' - {}\n'.format(afile))
f.write('\n')
f.write('Merged sources shape: {}\n'.format(merged_sources.shape))
f.write('Number of features: {} ({} unique)\n'.format(len(feature_names), len(set(feature_names))))
f.write('Number of sources: {}\n'.format(len(set(sources_names))))
@setup_results_dir
def save_excel(dataframe, filename, timestamp):
filepath = os.path.join(RESULTS_DIR, timestamp, filename + '.xlsx')
dataframe.to_excel(filepath, encoding='utf-8')
def save_partitions_simple(partitions, partitions_path, extra_columns=None, columns_order=None, sort_by_columns=None):
if extra_columns is not None:
extra_df = pd.DataFrame(extra_columns, index=partitions.index)
partitions = pd.concat([partitions, extra_df], axis=1)
if columns_order is not None:
partitions = partitions[columns_order]
if sort_by_columns is not None:
partitions = partitions.sort_values(sort_by_columns)
partitions.to_excel(partitions_path, encoding='utf-8')
@setup_results_dir
def save_partitions(partitions, timestamp, **kwargs):
partitions_path = os.path.join(RESULTS_DIR, timestamp, 'partitions' + '.xls')
save_partitions_simple(partitions, partitions_path, **kwargs)
return partitions_path
@setup_results_dir
def save_coassociation_matrix(ensemble, partition, timestamp, columns_order=None, image_format='pdf'):
if not MATPLOTLIB_INSTALLED:
raise ValueError('matplotlib is not installed')
condensed_matrix = _get_condensed_distance_matrix(ensemble)
full_matrix = squareform(condensed_matrix)
full_matrix = 1 - full_matrix
np.fill_diagonal(full_matrix, 0.0)
tomato_accessions = ensemble.columns
if columns_order is None:
ordering_partition = partition.columns[0]
else:
ordering_partition = columns_order[0]
partition_idxs_sorted = np.argsort(partition[ordering_partition])
sim_matrix_sorted = full_matrix[partition_idxs_sorted, :]
sim_matrix_sorted = sim_matrix_sorted[:, partition_idxs_sorted]
tomato_accessions_sorted = [tomato_accessions[idx] for idx in partition_idxs_sorted]
df = pd.DataFrame(sim_matrix_sorted, index=tomato_accessions_sorted, columns=tomato_accessions_sorted)
# fig = sns.plt.figure(figsize=(100, 100))
ax = sns.heatmap(df, square=True, linewidths=.05, cmap='Blues', cbar=None)
sns.plt.yticks(rotation=0)
sns.plt.xticks(rotation=90)
font_scale = (35.0 / len(tomato_accessions))
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(label.get_fontsize() * font_scale)
sim_matrix_path = os.path.join(RESULTS_DIR, timestamp, 'coasociation_matrix.' + image_format)
sns.plt.savefig(sim_matrix_path, dpi=300, bbox_inches='tight')
sns.plt.close()
sim_matrix_csv_path = os.path.join(RESULTS_DIR, timestamp, 'coasociation_matrix.csv')
df.to_csv(sim_matrix_csv_path)
return sim_matrix_path, full_matrix, sim_matrix_csv_path
@setup_results_dir
def save_similarity_matrix(partition, feature_names, sim_matrix, timestamp, image_format='pdf'):
if not MATPLOTLIB_INSTALLED:
raise ValueError('matplotlib is not installed')
partition_idxs_sorted = np.argsort(partition)
sim_matrix_sorted = sim_matrix[partition_idxs_sorted, :]
sim_matrix_sorted = sim_matrix_sorted[:, partition_idxs_sorted]
feature_names_sorted = [feature_names[idx] for idx in partition_idxs_sorted]
df = pd.DataFrame(sim_matrix_sorted, index=feature_names_sorted, columns=feature_names_sorted)
ax = sns.heatmap(df, square=True, linewidths=.05)
sns.plt.yticks(rotation=0)
sns.plt.xticks(rotation=90)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(1.5)
sim_matrix_path = os.path.join(RESULTS_DIR, timestamp, 'similarity_matrix.' + image_format)
sns.plt.savefig(sim_matrix_path, dpi=300, bbox_inches='tight')
return sim_matrix_path
@setup_results_dir
def save_reps_comparison(reps_comparison, timestamp):
reps_comparison_csv = os.path.join(RESULTS_DIR, timestamp, 'reps_comparison.csv')
reps_comparison.to_csv(reps_comparison_csv)
return reps_comparison_csv
@setup_results_dir
def save_clustermap(sim_matrix, feature_names, sources_names, partition_linkage, timestamp, image_format='pdf'):
if not MATPLOTLIB_INSTALLED:
raise ValueError('matplotlib is not installed')
font_scale = (50.0 / len(feature_names))
# sns.set(font_scale=font_scale)
fig = sns.plt.figure(figsize=(100, 100))
df = pd.DataFrame(sim_matrix, index=feature_names, columns=feature_names)
part00_k = 4
part00 = fcluster(partition_linkage, part00_k, criterion='maxclust') - 1
part01_k = 10
part01 = fcluster(partition_linkage, part01_k, criterion='maxclust') - 1
part02_k = 15
part02 = fcluster(partition_linkage, part02_k, criterion='maxclust') - 1
part03_k = 20
part03 = fcluster(partition_linkage, part03_k, criterion='maxclust') - 1
part00_colors = sns.color_palette('pastel', len(np.unique(part00)))
part01_colors = sns.color_palette('pastel', len(np.unique(part01)))
part02_colors = sns.color_palette('pastel', len(np.unique(part02)))
part03_colors = sns.color_palette('pastel', len(np.unique(part03)))
df_colors = pd.DataFrame(index=df.index)
df_colors['$k$={0}'.format(part00_k)] = [part00_colors[i] for i in part00]
df_colors['$k$={0}'.format(part01_k)] = [part01_colors[i] for i in part01]
df_colors['$k$={0}'.format(part02_k)] = [part02_colors[i] for i in part02]
df_colors['$k$={0}'.format(part03_k)] = [part03_colors[i] for i in part03]
cm = sns.clustermap(df, col_linkage=partition_linkage, row_linkage=partition_linkage,
row_colors=df_colors, col_colors=df_colors)
sns.plt.setp(cm.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
sns.plt.setp(cm.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
ax = cm.cax
pos1 = ax.get_position() # get the original position
pos2 = [pos1.x0 + 0.79, pos1.y0 - 0.02, pos1.width, pos1.height]
ax.set_position(pos2) # set a new position
unique_sources = list(set(sources_names))
sources_colors = sns.color_palette('hls', len(unique_sources))
ylabels = [x for x in cm.ax_heatmap.get_yticklabels()]
ylabels_text = [x.get_text() for x in ylabels]
leg_fig = cm.ax_heatmap.get_yticklabels()[0].figure
for label in cm.ax_heatmap.get_xticklabels():
label_idx = feature_names.index(label.get_text())
label_source = sources_names[label_idx]
color_idx = unique_sources.index(label_source)
label.set_color(sources_colors[color_idx])
label.set_fontsize(label.get_fontsize() * font_scale)
ylabel = ylabels[ylabels_text.index(label.get_text())]
ylabel.set_color(sources_colors[color_idx])
ylabel.set_fontsize(ylabel.get_fontsize() * font_scale)
legend_labels = unique_sources
legend_patches = [matplotlib.patches.Patch(color=C, label=L) for
C, L in zip(sources_colors, legend_labels)]
leg_fig.legend(handles=legend_patches, labels=legend_labels, loc='lower right', bbox_to_anchor=(0.970, 0.05))
clustermap_path = os.path.join(RESULTS_DIR, timestamp, 'clustermap.' + image_format)
cm.savefig(clustermap_path, dpi=300, bbox_inches='tight')
sns.plt.close()
clustermap_csv_path = os.path.join(RESULTS_DIR, timestamp, 'clustermap.' + 'csv')
df.to_csv(clustermap_csv_path)
return clustermap_path, clustermap_csv_path
|
py | 7df8e40c0ca8d6db423853b6fccd01dc28f10f48 | #!/usr/bin/env python
# pylint: disable=invalid-name,bare-except
# A tool to perform various operations on Jira
import os
import io
import sys
import re
import csv
import argparse
import netrc
import logging
import textwrap
import pprint
from collections import Counter
from tabulate import tabulate
import json
from utils import get_jira_client, get_login_credentials
FIELD_SPRINT = 'customfield_12310940'
FIELD_CONTRIBUTORS = 'customfield_12315950'
MAX_RESULTS = 100
TRIAGE_MAX_RESULTS = 300
SEARCH_QUERY_EPICS = 'project = MGMT AND component = "MGMT OCP Metal" AND type = Epic AND filter = '
CURRENT_VERSION_FILTER = '12347072'
NEXT_VERSION_FILTER = '12347073'
VALID_PRINT_FIELDS = ['key', 'summary', 'component', 'priority', 'status', 'assignee', 'fixVersion', 'sprint']
DEFAULT_PRINT_FIELDS = ['component', 'priority', 'status', 'assignee']
PERMENANT_PRINT_FIELDS = ['key', 'summary']
TEAM_COMPONENT_PREFIX = 'AI-Team'
NONE_EDGE_COMPONENTS = ['MGMT Integration']
PROJECT_LABELS = ['KNI-EDGE-4.8', 'KNI-EDGE-4.8-DAY2']
ADMIN_ROLE_ID = '10002'
logging.basicConfig(level=logging.WARN, format='%(levelname)-10s %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("__main__").setLevel(logging.INFO)
isVerbose = False
isDryRun = False
def log_exception(msg):
if isVerbose:
logger.exception(msg)
else:
logger.error(msg)
def jira_netrc_login(server, netrcFile):
cred = netrc.netrc(os.path.expanduser(netrcFile))
username, _, password = cred.authenticators(server)
logger.info("log-in with username: %s", username)
return username, password
def get_raw_field(issue, fieldName):
try:
return issue.fields.__dict__[fieldName]
except Exception:
return None
def get_sprint_name(issue):
sprint_str = get_raw_field(issue, FIELD_SPRINT)
if not sprint_str:
return None
m = re.findall(r',name=([^,]+),', sprint_str[0])
if not m:
return None
return m[0]
def get_sprint_id(issue):
sprint_str = get_raw_field(issue, FIELD_SPRINT)
if not sprint_str:
return None
m = re.findall(r',sequence=([0-9]+),', sprint_str[0])
if not m:
return None
return int(m[0])
def get_assignee(issue):
try:
return issue.fields.assignee.displayName
except Exception:
try:
return issue.raw['fields']['assignee']['displayName']
except Exception:
return "Unassigned"
def format_key_for_print(key, isMarkdown):
if not isMarkdown:
return key
return f"[{key}](https://issues.redhat.com/browse/{key})"
def get_data_for_print(issues, issues_count=None, print_fields=None):
if not print_fields:
print_fields = DEFAULT_PRINT_FIELDS
headers = PERMENANT_PRINT_FIELDS + print_fields
if issues_count:
headers.append('count')
table = []
for i in issues:
row = {}
if 'key' in headers:
row['key'] = i.key
if 'summary' in headers:
row['summary'] = i.fields.summary
if 'component' in headers:
row['component'] = [c.name for c in i.fields.components]
if 'priority' in headers:
try:
row['priority'] = i.fields.priority.name
except Exception:
row['priority'] = i.fields.priority
if 'status' in headers:
row['status'] = str(i.fields.status)
if 'fixVersion' in headers:
row['fixVersion'] = "" if len(i.fields.fixVersions) == 0 else i.fields.fixVersions[0].name
if 'assignee' in headers:
assignee = get_assignee(i)
row['assignee'] = assignee
if 'sprint' in headers:
sid = get_sprint_id(i)
if sid:
row['sprint'] = "({}) {}".format(sid, get_sprint_name(i))
else:
row['sprint'] = ""
if issues_count:
row['count'] = issues_count[i.key]
table.append(row)
return headers, table
def print_report_csv(issues, issues_count=None, print_fields=None):
headers, data = get_data_for_print(issues, issues_count=issues_count,
print_fields=print_fields)
output = io.StringIO()
csvout = csv.DictWriter(output, delimiter='|', fieldnames=headers)
csvout.writeheader()
for row in data:
csvout.writerow(row)
return output.getvalue()
def print_report_json(issues, issues_count=None, print_fields=None):
_, data = get_data_for_print(issues, issues_count=issues_count,
print_fields=print_fields)
return json.dumps(data)
def print_report_table(issues, isMarkdown=False, issues_count=None, print_fields=None):
_, data = get_data_for_print(issues, issues_count=issues_count,
print_fields=print_fields)
fmt = "github" if isMarkdown else "psql"
return tabulate(data, headers="keys", tablefmt=fmt)
def print_raw(issues):
for i in issues:
pprint.pprint(i.raw)
class JiraTool():
def __init__(self, j, maxResults=MAX_RESULTS):
self._jira = j
self._maxResults = maxResults
self._admin_in_projects = {}
def jira(self):
return self._jira
def is_admin_in_project(self, project):
try:
return self._admin_in_projects[project]
except Exception:
pass
is_admin = False
try:
is_admin = self._jira.my_permissions(project)['permissions']['PROJECT_ADMIN']['havePermission']
except Exception:
log_exception("Cannot get permissions for project {}".format(project))
self._admin_in_projects[project] = is_admin
return is_admin
def link_tickets(self, ticket, to_ticket):
try:
logger.info("linking %s to %s", to_ticket.key, ticket.key)
res = self._jira.create_issue_link("relates to", ticket, to_ticket)
res.raise_for_status()
except Exception:
logger.exceptio("Error linking to %s", to_ticket.key)
def update_issue_fields(self, issue, fields_dict):
if isDryRun:
print("Updating issue {} with fields: {}".format(issue.key, fields_dict))
else:
issue.update(fields=fields_dict, notify=self.is_admin_in_project(issue.fields.project.key))
def add_assignee_as_contributor(self, ticket):
try:
assignee = ticket.fields.assignee
if not assignee:
logger.debug("Ticket %s is not assigned", ticket.key)
return
contributors = get_raw_field(ticket, FIELD_CONTRIBUTORS)
if not contributors:
contributors = []
contributor_names = [u.name for u in contributors]
if assignee.name in contributor_names:
logger.debug("%s is already contributor of %s", assignee.name, ticket.key)
return
contributor_names.append(assignee.name)
logger.info("Adding %s as contributor to %s", assignee.name, ticket.key)
self.update_issue_fields(ticket, {FIELD_CONTRIBUTORS: [{'name': u} for u in contributor_names]})
except Exception:
logger.exception("Error adding contributor to %s", ticket.key)
def add_watchers(self, ticket, watchers):
try:
for watcher in watchers:
logger.info("Adding %s as watcher to %s", watcher, ticket.key)
self._jira.add_watcher(ticket.key, watcher)
except Exception:
logger.exception("Error adding watcher to %s", ticket.key)
def remove_watchers(self, ticket, watchers):
try:
for watcher in watchers:
logger.info("removing %s as watcher from %s", watcher, ticket.key)
self._jira.remove_watcher(ticket.key, watcher)
except Exception:
logger.exception("Error removing watcher to %s", ticket.key)
@staticmethod
def get_team_component(issue):
for c in issue.fields.components:
if c.name.startswith(TEAM_COMPONENT_PREFIX):
return c
return None
@staticmethod
# Returns True if one of the components provided is assigend to the issue
def get_existing_components(issue, components):
existing = []
for c in issue.fields.components:
for rc in components:
if rc == c.name:
existing.append(rc)
return existing
@staticmethod
def get_project_labels(issue):
labels = []
for label in issue.fields.labels:
if label in PROJECT_LABELS:
labels.append(label)
return labels
# There can only be one team component for an issue, so adding
# a team component, will replace the previous team component
def add_component(self, issue, component):
is_team_component = False
if component.startswith(TEAM_COMPONENT_PREFIX):
is_team_component = True
names = []
for c in issue.fields.components:
if c.name == component:
logger.debug("%s, is already in %s", component, issue.key)
return
if is_team_component and c.name.startswith(TEAM_COMPONENT_PREFIX):
logger.info("Removing team component %s from %s", c.name, issue.key)
else:
names.append({'name': c.name})
names.append({'name': component})
logger.info("add_component: updating %s with components: %s", issue.key, names)
self.update_issue_fields(issue, {"components": names})
def remove_component(self, issue, component):
was_found = False
names = []
for c in issue.fields.components:
if c.name == component:
logger.info("removing %s from %s", component, issue.key)
was_found = True
else:
names.append({'name': c.name})
if not was_found:
logger.debug("remove_component: component %s not found in %s", component, issue.key)
else:
logger.info("remove_component: updating %s with components: %s", issue.key, names)
self.update_issue_fields(issue, {"components": names})
def add_labels(self, issue, labels):
new_labels = labels.copy()
for label in issue.fields.labels:
if label in labels:
logger.debug("%s, is already in %s", label, issue.key)
else:
new_labels.append(label)
if new_labels != issue.fields.labels:
logger.info("add_labels: updating %s with labels: %s", issue.key, new_labels)
self.update_issue_fields(issue, {"labels": new_labels})
def remove_labels(self, issue, labels):
was_found = False
new_labels = []
for label in issue.fields.labels:
if label in labels:
logger.info("removing %s from %s", label, issue.key)
was_found = True
else:
new_labels.append(label)
if not was_found:
logger.debug("remove_labels: labels %s not found in %s", labels, issue.key)
else:
logger.info("remove_labels: updating %s with labels: %s", issue.key, new_labels)
self.update_issue_fields(issue, {"labels": new_labels})
def remove_comment(self, issue, identifying_string):
was_found = False
comments = self._jira.comments(issue.key)
for comment in comments:
if identifying_string in comment.body:
was_found = True
comment.delete()
logger.info("remove_comment: removing comment '%s' from %s", identifying_string, issue.key)
print("Removing comment: {}".format(comment.body))
if not was_found:
logger.debug("remove_comment: comment '%s' not found in %s", identifying_string, issue.key)
def get_selected_linked_issues(self, issues):
linked_issue_keys = []
linked_issues = []
for i in issues:
for label in i.fields.issuelinks:
linked_issue_keys.append(self.extract_linked_issue(label).key)
issue_keys_count = Counter(linked_issue_keys)
if linked_issue_keys:
linked_issues = self._jira.search_issues("project != AITRIAGE AND issue in (%s)" % (",".join(set(linked_issue_keys))),
maxResults=self._maxResults)
# remove triaging tickets from the list
filtered_linked_issues = []
for i in linked_issues:
if "Assisted-installer Triage" not in [c.name for c in i.fields.components]:
filtered_linked_issues.append(i)
return filtered_linked_issues, issue_keys_count
def get_selected_issues(self, issues, isEpicTasks=False, onlyMgmtIssues=False):
if not isEpicTasks:
return issues
extra_filter = ""
if onlyMgmtIssues:
extra_filter = ' and project = MGMT'
return self._jira.search_issues("\"Epic Link\" in (%s) %s" % (",".join([i.key for i in issues]), extra_filter),
maxResults=self._maxResults)
@staticmethod
def extract_linked_issue(issue_link):
try:
return issue_link.outwardIssue
except Exception:
return issue_link.inwardIssue
def remove_links(self, ticket, to_remove):
for link in ticket.fields.issuelinks:
key = self.extract_linked_issue(link).key
if key == to_remove.key:
try:
logger.info("removing link %s", key)
link.delete()
except Exception:
log_exception("Error removing link {}".format(key))
def get_issues_in_epic(self, key):
issues = self._jira.search_issues("\"Epic Link\" in (%s)" % key, maxResults=self._maxResults)
if len(issues) == 0:
epic = self._jira.issue(key)
issues = [self._jira.issue(s.key) for s in epic.fields.subtasks]
return issues
class buildEpicFilterAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super().__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, SEARCH_QUERY_EPICS + values)
def filter_issue_status(issues, statuses):
if not statuses:
return issues
filtered_issues = []
for i in issues:
if i.fields.status.name in statuses:
filtered_issues.append(i)
return filtered_issues
def epic_fixup(jtool, epic_list):
for epic in epic_list:
if epic.fields.issuetype.name != 'Epic':
logger.debug("Issue %s is not an Epic", epic.key)
continue
logger.info("Fixing epic %s", epic.key)
jtool.add_assignee_as_contributor(epic)
team = jtool.get_team_component(epic)
project_labels = jtool.get_project_labels(epic)
epic_issues = jtool.get_selected_issues([epic], isEpicTasks=True, onlyMgmtIssues=True)
if team:
for i in epic_issues:
if not jtool.get_existing_components(i, NONE_EDGE_COMPONENTS):
jtool.add_component(i, team.name)
else:
# should remove any Team Component from the epic tasks?
pass
if project_labels:
for i in epic_issues:
jtool.add_labels(i, project_labels)
missing_project_labels = [label for label in PROJECT_LABELS if label not in project_labels]
if missing_project_labels:
for i in epic_issues:
jtool.remove_labels(i, missing_project_labels)
def handle_component_update(args, jiraTool, issues):
if args.add_component is not None:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.add_component(i, args.add_component)
if args.remove_component is not None:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.remove_component(i, args.remove_component)
def handle_labels_update(args, jiraTool, issues):
if args.add_labels is not None:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.add_labels(i, args.add_labels)
if args.remove_labels is not None:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.remove_labels(i, args.remove_labels)
def handle_remove_comment(args, jiraTool, issues):
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.remove_comment(i, args.remove_comment)
def handle_watchers_update(args, jiraTool, issues):
if args.add_watchers is not None:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.add_watchers(i, args.add_watchers)
if args.remove_watchers is not None:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.remove_watchers(i, args.remove_watchers)
def handle_link_update(args, jiraTool, issues):
if args.link_to is not None:
to_ticket = jiraTool.jira().issue(args.link_to)
logger.info("Linking search result to %s", to_ticket.key)
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.link_tickets(i, to_ticket)
if args.remove_link is not None:
to_remove = jiraTool.jira().issue(args.remove_link)
logger.info("Removing link to %s from search result to: ", to_remove)
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.remove_links(i, to_remove)
def handle_fix_version_update(args, jiraTool, issues):
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
if len(i.fields.fixVersions) != 1 or args.fix_version != i.fields.fixVersions[0].name:
if i.fields.status.name in ["Closed"]:
logger.info("Not changing fixVersion of %s because it is %s", i.key, i.fields.status)
continue
logger.info("setting fixVersion %s to issue %s", args.fix_version, i.key)
i.fields.fixVersions = [{'name': args.fix_version}]
try:
jiraTool.update_issue_fields(i, {'fixVersions': i.fields.fixVersions})
except Exception:
log_exception("Could not set fixVersion of {}".format(i.key))
else:
logger.info("issue %s is already at fixVersion %s", i.key, args.fix_version)
def handle_sprint_update(args, jiraTool, issues):
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
sprint_value = get_sprint_id(i)
if sprint_value == args.sprint:
logger.debug("Issue %s is already at sprint %s", i.key, args.sprint)
continue
logger.info("setting sprint %s to issue %s", args.sprint, i.key)
try:
jiraTool.update_issue_fields(i, {FIELD_SPRINT: args.sprint})
except Exception:
log_exception("Could not set sprint of {}".format(i.key))
def main(args):
username, password = get_login_credentials(args.user_password)
j = get_jira_client(
access_token=args.jira_access_token,
username=username,
password=password,
netrc_file=args.netrc,
)
max_results = args.max_results
if args.max_results == MAX_RESULTS and args.linked_issues:
logger.debug("Increasing the Jira maxResults param to %s", TRIAGE_MAX_RESULTS)
max_results = TRIAGE_MAX_RESULTS
jiraTool = JiraTool(j, maxResults=max_results)
if args.issue is not None:
issues = [jiraTool.jira().issue(args.issue)]
elif args.bz_issue is not None:
issues = jiraTool.jira().search_issues('project = OCPBUGSM AND component = assisted-installer and summary ~ "{}"'.format(args.bz_issue))
else:
issues = jiraTool.jira().search_issues(args.search_query, maxResults=args.max_results)
if args.sprint:
handle_sprint_update(args, jiraTool, issues)
sys.exit()
if args.epic_fixup:
epic_fixup(jiraTool, jiraTool.get_selected_issues(issues))
sys.exit()
if args.update_contributors:
for i in jiraTool.get_selected_issues(issues, args.epic_tasks):
jiraTool.add_assignee_as_contributor(i)
sys.exit()
if args.add_labels is not None or args.remove_labels is not None:
handle_labels_update(args, jiraTool, issues)
sys.exit()
if args.remove_comment is not None:
handle_remove_comment(args, jiraTool, issues)
if args.add_component is not None or args.remove_component is not None:
handle_component_update(args, jiraTool, issues)
sys.exit()
if args.add_watchers is not None or args.remove_watchers is not None:
handle_watchers_update(args, jiraTool, issues)
sys.exit()
if args.link_to is not None or args.remove_link is not None:
handle_link_update(args, jiraTool, issues)
sys.exit()
if args.fix_version is not None:
handle_fix_version_update(args, jiraTool, issues)
sys.exit()
issues_count = None
if args.linked_issues:
issues, issues_count = jiraTool.get_selected_linked_issues(issues)
else:
issues = jiraTool.get_selected_issues(issues, args.epic_tasks)
if args.print_raw:
print_raw(issues)
if args.print_report:
return print_report_table(filter_issue_status(issues, args.include_status), issues_count=issues_count,
print_fields=args.print_field)
elif args.print_markdown_report:
return print_report_table(filter_issue_status(issues, args.include_status),
isMarkdown=True, issues_count=issues_count,
print_fields=args.print_field)
elif args.print_json:
return print_report_json(filter_issue_status(issues, args.include_status),
issues_count=issues_count,
print_fields=args.print_field)
elif args.print_csv_report:
return print_report_csv(filter_issue_status(issues, args.include_status), issues_count=issues_count,
print_fields=args.print_field)
return ""
def build_parser():
VALID_STATUS = ['QE Review', 'To Do', 'Done', 'Obsolete', 'Code Review', 'In Progress']
helpDescription = textwrap.dedent("""
Tool to help perform common operations on Jira tickets
Use it to select Jirat ticket using one of the selection arguments,
then perform one of the supported operations on the selected ticket.
""")
helpEpilog = textwrap.dedent("""
Usage examples:
\tChange the fixVersion of STOR-1111 to v1.1.1:
\t\tjira_cmd.py -i STOR-1111 -f v1.1.1
\tIf STOR-1111 is an Epic, change fixVersion of tickets in the Epic to v1.1.1
\t\tjira_cmd.py -i STOR-1111 -et -f v1.1.1
\tPrint all Epics for current relase
\t\tjira_cmd.py -ce -p
\tPrint all tickets that belong to Epics for current relase
\t\tjira_cmd.py -ce -et -p
""")
parser = argparse.ArgumentParser(epilog=helpEpilog,
description=helpDescription,
formatter_class=argparse.RawDescriptionHelpFormatter)
loginGroup = parser.add_argument_group(title="Jira login options")
loginArgs = loginGroup.add_mutually_exclusive_group()
loginArgs.add_argument("--netrc", default="~/.netrc", help="netrc file")
loginArgs.add_argument("-up", "--user-password", required=False,
help="Username and password in the format of user:pass")
loginArgs.add_argument("--jira-access-token", default=os.environ.get("JIRA_ACCESS_TOKEN"), required=False,
help="PAT (personal access token) for accessing Jira")
selectorsGroup = parser.add_argument_group(title="Issues selection")
selectors = selectorsGroup.add_mutually_exclusive_group(required=True)
selectors.add_argument("-s", "--search-query", required=False, help="Search query to use")
selectors.add_argument("-i", "--issue", required=False, help="Issue key")
selectors.add_argument("-bz", "--bz-issue", required=False, help="BZ issue key")
selectors.add_argument("-crepics", "--current-release-epics-filter", action=buildEpicFilterAction, dest="search_query",
help="Search for Epics matching the given named filter")
selectors.add_argument("-nf", "--named-filter", action=buildEpicFilterAction, dest="search_query",
help="Search for Epics matching the given named filter")
selectors.add_argument("-cre", "--current-release-epics", action='store_const',
dest="search_query", const='filter in ("AI sprint planning current epics")',
help="Search for current release epics")
selectors.add_argument("-eff", "--epics-for-fixup", action='store_const',
dest="search_query", const='filter = "AI epics for fixup"',
help="Search for epics for fixup operation")
selectors.add_argument("-tt", "--triaging-tickets", action='store_const', dest="search_query",
const='project = AITRIAGE AND component = "Cloud-Triage" ORDER BY key DESC',
help="Search for Assisted Installer triaging tickets")
selectors.add_argument("-rtt", "--recent-triaging-tickets", action='store_const',
dest="search_query",
const='project = AITRIAGE AND component = "Cloud-Triage" AND created > -7d',
help="Search for Assisted Installer triaging tickets")
selectors.add_argument("-ce", "--current-version-epics", action='store_const',
dest="search_query", const=SEARCH_QUERY_EPICS + CURRENT_VERSION_FILTER,
help="Search for Epics planned for current version")
selectors.add_argument("-ne", "--next-version-epics", action="store_const",
dest="search_query", const=SEARCH_QUERY_EPICS + NEXT_VERSION_FILTER, help="Search for Epics planned for next version")
parser.add_argument("-li", "--linked-issues", action="store_true", help="Output the issues linked to selected issues")
parser.add_argument("-m", "--max-results", default=MAX_RESULTS, help="Maximum results to return for search query")
parser.add_argument("-v", "--verbose", action="store_true", help="Output verbose logging")
parser.add_argument("-d", "--dry-run", action="store_true", help="Do not update tickets")
parser.add_argument("-et", "--epic-tasks", action="store_true",
help="Operate on tickets that belong to epics in selection, "
+ "instead of the selected tickets themselves")
parser.add_argument("-is", "--include-status", action='append', choices=VALID_STATUS,
help="filter issues based on supplied statuses when printing the issues details")
parser.add_argument("-pf", "--print-field", action='append', choices=VALID_PRINT_FIELDS,
help="Add provided fields to the output")
opsGroup = parser.add_argument_group(title="Operations to perform on selected issues")
ops = opsGroup.add_mutually_exclusive_group(required=True)
ops.add_argument("-p", "--print-report", action="store_true", help="Print issues details")
ops.add_argument("-pr", "--print-raw", action="store_true", help="Print raw issues details")
ops.add_argument("-pc", "--print-csv-report", action="store_true", help="Print issues details")
ops.add_argument("-pj", "--print-json", action="store_true", help="Print issues details")
ops.add_argument("-pmd", "--print-markdown-report", action="store_true", help="Print issues details")
ops.add_argument("-al", "--link-to", default=None, help="Link tickets from search result to provided ticket")
ops.add_argument("-rl", "--remove-link", default=None, help="Remove the provided ticket tickets in search results")
ops.add_argument("-sp", "--sprint", default=None, type=int,
help="Assigne the tickets in search results to the provided sprint")
ops.add_argument("-aw", "--add-watchers", default=None, nargs="+", help="Add the watcher to the selected tickets")
ops.add_argument("-rw", "--remove-watchers", default=None, nargs="+", help="Remove the watcher from the selected tickets")
ops.add_argument("-ac", "--add-component", default=None, help="Add the component to the selected tickets")
ops.add_argument("-rc", "--remove-component", default=None, help="Remove the component from the selected tickets")
ops.add_argument("-ala", "--add-labels", default=None, nargs="+", help="Add the label to the selected tickets")
ops.add_argument("-rla", "--remove-labels", nargs="+", default=None, help="Remove the label from the selected tickets")
ops.add_argument("--remove-comment", help="Remove comments that have the provided text")
ops.add_argument("-f", "--fix-version", help="Set the fixVersion of selected tickets")
ops.add_argument("-uc", "--update-contributors", action="store_true", help="Add assignee to contributors")
ops.add_argument("-ef", "--epic-fixup", action="store_true", help=textwrap.dedent("""
Operate on epics. Will perform some common epic related fixups such as:
add assignee to the contributor field,
add epic's Team component to all tasks.
if epic has a project-related label, will add it to all tasks
"""))
return parser
if __name__ == "__main__":
cmdline_args = build_parser().parse_args()
if cmdline_args.print_json:
logging.getLogger("__main__").setLevel(logging.WARN)
if cmdline_args.verbose:
isVerbose = True
logging.getLogger("__main__").setLevel(logging.DEBUG)
if cmdline_args.dry_run:
isDryRun = True
print(main(cmdline_args))
|
py | 7df8e57c2ca213ebb1e8c54768571b83ff9f7cc8 | import os, json
print("Content-type:text/html\r\n\r\n")
print
print("<title>Test CGI</title>")
print("<p>Hello World!</p>")
print(os.environ)
for param in os.environ.keys():
#print(param)
if(param=="QUERY_STRING"):
print(param)
for param in os.environ.keys():
#print(param)
if(param=="HTTP_USER_AGENT"):
print(param) |
py | 7df8e5b03e98819bc02cd5f6d752ee107950db4a | # -*- coding: utf-8 -*-
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['voltaware_collector'],
package_dir={'': 'src'})
setup(**setup_args)
|
py | 7df8e695e58e0889e57e04b0c01a8e9fb8496e29 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1StatefulSetUpdateStrategy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rolling_update': 'V1RollingUpdateStatefulSetStrategy',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None):
"""
V1StatefulSetUpdateStrategy - a model defined in Swagger
"""
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""
Gets the rolling_update of this V1StatefulSetUpdateStrategy.
RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
:return: The rolling_update of this V1StatefulSetUpdateStrategy.
:rtype: V1RollingUpdateStatefulSetStrategy
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""
Sets the rolling_update of this V1StatefulSetUpdateStrategy.
RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
:param rolling_update: The rolling_update of this V1StatefulSetUpdateStrategy.
:type: V1RollingUpdateStatefulSetStrategy
"""
self._rolling_update = rolling_update
@property
def type(self):
"""
Gets the type of this V1StatefulSetUpdateStrategy.
Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.
:return: The type of this V1StatefulSetUpdateStrategy.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1StatefulSetUpdateStrategy.
Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.
:param type: The type of this V1StatefulSetUpdateStrategy.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1StatefulSetUpdateStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 7df8e6db7cfb19af695ee5756e7c4176dd93dc7f | from fltk import *
import copy
import numpy as np
import math
import sys
if '../..' not in sys.path:
sys.path.append('../..')
from PyCommon.modules.Math import mmMath as mm
from PyCommon.modules.Resource import ysMotionLoader as yf
from PyCommon.modules.Renderer import ysRenderer as yr
from PyCommon.modules.Simulator import csVpWorld as cvw
from PyCommon.modules.Simulator import csVpModel as cvm
# from PyCommon.modules.GUI import ysSimpleViewer as ysv
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Optimization import ysAnalyticConstrainedOpt as yac
from PyCommon.modules.Util import ysPythonEx as ype
from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp
from PyCommon.modules.ArticulatedBody import ysMomentum as ymt
from PyCommon.modules.ArticulatedBody import ysControl as yct
from PyCommon.modules.ArticulatedBody import hpInvKineDart as hik
from MomentumProject.foot_example_segfoot_constraint import mtOptimize as mot
from MomentumProject.foot_example_segfoot_constraint import mtInitialize as mit
from MomentumProject.foot_example_segfoot_constraint.foot_window import FootWindow
from PyCommon.modules.ArticulatedBody import hpFootIK as hfi
# from scipy.spatial import Delaunay
# import pydart2 as pydart
# from PyCommon.modules.Simulator import csDartModel as cdm
# from OpenGL.GL import *
# from OpenGL.GLUT import *
g_initFlag = 0
forceShowTime = 0
JsysPre = 0
JsupPreL = 0
JsupPreR = 0
JconstPre = 0
contactChangeCount = 0
contactChangeType = 0
contact = 0
maxContactChangeCount = 30
preFootCenter = [None]
DART_CONTACT_ON = False
SKELETON_ON = True
def main():
# np.set_printoptions(precision=4, linewidth=200)
np.set_printoptions(precision=5, threshold=np.inf, suppress=True, linewidth=3000)
motionFile = 'wd2_tiptoe.bvh'
motionFile = 'wd2_tiptoe_zygote.bvh'
# motion, mcfg, wcfg, stepsPerFrame, config, frame_rate = mit.create_biped(motionFile, SEGMENT_FOOT_RAD=0.008)
motion, mcfg, wcfg, stepsPerFrame, config, frame_rate = mit.create_biped(motionFile, SEGMENT_FOOT_MAG=0.01, SEGMENT_FOOT_RAD=0.008)
# motion, mcfg, wcfg, stepsPerFrame, config, frame_rate = mit.create_biped()
# motion, mcfg, wcfg, stepsPerFrame, config = mit.create_jump_biped()
vpWorld = cvw.VpWorld(wcfg)
vpWorld.SetGlobalDamping(0.999)
motionModel = cvm.VpMotionModel(vpWorld, motion[0], mcfg)
controlModel = cvm.VpControlModel(vpWorld, motion[0], mcfg)
# controlModel_shadow_for_ik = cvm.VpControlModel(vpWorld, motion[0], mcfg)
vpWorld.initialize()
controlModel.initializeHybridDynamics()
# controlToMotionOffset = (1.5, -0.02, 0)
controlToMotionOffset = (1.5, 0., 0)
controlModel.translateByOffset(controlToMotionOffset)
# controlModel_shadow_for_ik.set_q(controlModel.get_q())
# controlModel_shadow_for_ik.computeJacobian(0, np.array([0., 0., 0.]))
wcfg_ik = copy.deepcopy(wcfg)
vpWorld_ik = cvw.VpWorld(wcfg_ik)
controlModel_ik = cvm.VpControlModel(vpWorld_ik, motion[0], mcfg)
vpWorld_ik.initialize()
controlModel_ik.set_q(np.zeros_like(controlModel.get_q()))
totalDOF = controlModel.getTotalDOF()
DOFs = controlModel.getDOFs()
foot_dofs = []
left_foot_dofs = []
right_foot_dofs = []
foot_seg_dofs = []
left_foot_seg_dofs = []
right_foot_seg_dofs = []
# for joint_idx in range(motion[0].skeleton.getJointNum()):
for joint_idx in range(controlModel.getJointNum()):
joint_name = controlModel.index2name(joint_idx)
# joint_name = motion[0].skeleton.getJointName(joint_idx)
if 'Foot' in joint_name:
foot_dofs_temp = controlModel.getJointDOFIndexes(joint_idx)
foot_dofs.extend(foot_dofs_temp)
if 'Left' in joint_name:
left_foot_dofs.extend(foot_dofs_temp)
elif 'Right' in joint_name:
right_foot_dofs.extend(foot_dofs_temp)
if 'foot' in joint_name:
foot_dofs_temp = controlModel.getJointDOFIndexes(joint_idx)
foot_seg_dofs.extend(foot_dofs_temp)
if 'Left' in joint_name:
left_foot_seg_dofs.extend(foot_dofs_temp)
elif 'Right' in joint_name:
right_foot_seg_dofs.extend(foot_dofs_temp)
# parameter
Kt = config['Kt']; Dt = config['Dt'] # tracking gain
Kl = config['Kl']; Dl = config['Dl'] # linear balance gain
Kh = config['Kh']; Dh = config['Dh'] # angular balance gain
Ks = config['Ks']; Ds = config['Ds'] # penalty force spring gain
Bt = config['Bt']
Bl = config['Bl']
Bh = config['Bh']
selectedBody = motion[0].skeleton.getJointIndex(config['end'])
constBody = motion[0].skeleton.getJointIndex('RightFoot')
supL = motion[0].skeleton.getJointIndex('LeftFoot')
supR = motion[0].skeleton.getJointIndex('RightFoot')
# momentum matrix
linkMasses = controlModel.getBodyMasses()
totalMass = controlModel.getTotalMass()
TO = ymt.make_TO(linkMasses)
dTO = ymt.make_dTO(len(linkMasses))
# optimization
problem = yac.LSE(totalDOF, 12)
# a_sup = (0,0,0, 0,0,0) #ori
# a_sup = (0,0,0, 0,0,0) #L
CP_old = [mm.v3(0., 0., 0.)]
# penalty method
bodyIDsToCheck = list(range(vpWorld.getBodyNum()))
# mus = [1.]*len(bodyIDsToCheck)
mus = [.5]*len(bodyIDsToCheck)
# flat data structure
ddth_des_flat = ype.makeFlatList(totalDOF)
dth_flat = ype.makeFlatList(totalDOF)
ddth_sol = ype.makeNestedList(DOFs)
# viewer
rd_footCenter = [None]
rd_footCenter_ref = [None]
rd_footCenterL = [None]
rd_footCenterR = [None]
rd_CM_plane = [None]
rd_CM = [None]
rd_CP = [None]
rd_CP_des = [None]
rd_dL_des_plane = [None]
rd_dH_des = [None]
rd_grf_des = [None]
rd_exf_des = [None]
rd_exfen_des = [None]
rd_root_des = [None]
rd_foot_ori = [None]
rd_foot_pos = [None]
rd_root_ori = [None]
rd_root_pos = [None]
rd_CF = [None]
rd_CF_pos = [None]
rootPos = [None]
selectedBodyId = [selectedBody]
extraForce = [None]
extraForcePos = [None]
rightFootVectorX = [None]
rightFootVectorY = [None]
rightFootVectorZ = [None]
rightFootPos = [None]
rightVectorX = [None]
rightVectorY = [None]
rightVectorZ = [None]
rightPos = [None]
def makeEmptyBasicSkeletonTransformDict(init=None):
Ts = dict()
Ts['pelvis'] = init
Ts['spine_ribs'] = init
Ts['head'] = init
Ts['thigh_R'] = init
Ts['shin_R'] = init
Ts['foot_heel_R'] = init
Ts['foot_R'] = init
Ts['heel_R'] = init
Ts['outside_metatarsal_R'] = init
Ts['outside_phalanges_R'] = init
Ts['inside_metatarsal_R'] = init
Ts['inside_phalanges_R'] = init
Ts['upper_limb_R'] = init
Ts['lower_limb_R'] = init
Ts['thigh_L'] = init
Ts['shin_L'] = init
Ts['foot_heel_L'] = init
Ts['foot_L'] = init
Ts['heel_L'] = init
Ts['outside_metatarsal_L'] = init
Ts['outside_phalanges_L'] = init
Ts['inside_metatarsal_L'] = init
Ts['inside_phalanges_L'] = init
Ts['upper_limb_L'] = init
Ts['lower_limb_L'] = init
return Ts
# viewer = ysv.SimpleViewer()
# viewer = hsv.hpSimpleViewer(rect=[0, 0, 1024, 768], viewForceWnd=False)
viewer = hsv.hpSimpleViewer(rect=[0, 0, 960+300, 1+1080+55], viewForceWnd=False)
# viewer.record(False)
# viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,255,255), yr.LINK_BONE))
viewer.doc.addObject('motion', motion)
viewer.doc.addRenderer('motionModel', yr.VpModelRenderer(motionModel, (150,150,255), yr.POLYGON_FILL))
viewer.doc.setRendererVisible('motionModel', False)
viewer.doc.addRenderer('ikModel', yr.VpModelRenderer(controlModel_ik, (150,150,255), yr.POLYGON_LINE))
viewer.doc.setRendererVisible('ikModel', False)
# viewer.doc.addRenderer('controlModel', cvr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_LINE))
control_model_renderer = yr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_FILL)
viewer.doc.addRenderer('controlModel', control_model_renderer)
skeleton_renderer = None
if SKELETON_ON:
# skeleton_renderer = yr.BasicSkeletonRenderer(makeEmptyBasicSkeletonTransformDict(np.eye(4)), offset_Y=-0.08)
# skeleton_renderer = yr.BasicSkeletonRenderer(makeEmptyBasicSkeletonTransformDict(np.eye(4)), color=(230, 230, 230), offset_draw=(0.8, -0.02, 0.))
skeleton_renderer = yr.BasicSkeletonRenderer(makeEmptyBasicSkeletonTransformDict(np.eye(4)), color=(230, 230, 230), offset_draw=(0., -0.0, 0.))
viewer.doc.addRenderer('skeleton', skeleton_renderer)
viewer.doc.addRenderer('rd_footCenter', yr.PointsRenderer(rd_footCenter))
viewer.doc.setRendererVisible('rd_footCenter', False)
viewer.doc.addRenderer('rd_footCenter_ref', yr.PointsRenderer(rd_footCenter_ref))
viewer.doc.setRendererVisible('rd_footCenter_ref', False)
viewer.doc.addRenderer('rd_CM_plane', yr.PointsRenderer(rd_CM_plane, (255,255,0)))
viewer.doc.setRendererVisible('rd_CM_plane', False)
viewer.doc.addRenderer('rd_CP', yr.PointsRenderer(rd_CP, (0,255,0)))
viewer.doc.setRendererVisible('rd_CP', False)
viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (255,0,255)))
viewer.doc.setRendererVisible('rd_CP_des', False)
viewer.doc.addRenderer('rd_dL_des_plane', yr.VectorsRenderer(rd_dL_des_plane, rd_CM, (255,255,0)))
viewer.doc.setRendererVisible('rd_dL_des_plane', False)
viewer.doc.addRenderer('rd_dH_des', yr.VectorsRenderer(rd_dH_des, rd_CM, (0,255,0)))
viewer.doc.setRendererVisible('rd_dH_des', False)
# viewer.doc.addRenderer('rd_grf_des', yr.ForcesRenderer(rd_grf_des, rd_CP_des, (0,255,0), .001))
viewer.doc.addRenderer('rd_CF', yr.VectorsRenderer(rd_CF, rd_CF_pos, (255,255,0)))
viewer.doc.setRendererVisible('rd_CF', False)
viewer.doc.addRenderer('rd_foot_ori', yr.OrientationsRenderer(rd_foot_ori, rd_foot_pos, (255,255,0)))
viewer.doc.setRendererVisible('rd_foot_ori', False)
viewer.doc.addRenderer('rd_root_ori', yr.OrientationsRenderer(rd_root_ori, rd_root_pos, (255,255,0)))
viewer.doc.setRendererVisible('rd_root_ori', False)
viewer.doc.addRenderer('extraForce', yr.VectorsRenderer(rd_exf_des, extraForcePos, (0,255,0)))
viewer.doc.setRendererVisible('extraForce', False)
viewer.doc.addRenderer('extraForceEnable', yr.VectorsRenderer(rd_exfen_des, extraForcePos, (255,0,0)))
# viewer.doc.addRenderer('right_foot_oriX', yr.VectorsRenderer(rightFootVectorX, rightFootPos, (255,0,0)))
# viewer.doc.addRenderer('right_foot_oriY', yr.VectorsRenderer(rightFootVectorY, rightFootPos, (0,255,0)))
# viewer.doc.addRenderer('right_foot_oriZ', yr.VectorsRenderer(rightFootVectorZ, rightFootPos, (0,0,255)))
# viewer.doc.addRenderer('right_oriX', yr.VectorsRenderer(rightVectorX, rightPos, (255,0,0)))
# viewer.doc.addRenderer('right_oriY', yr.VectorsRenderer(rightVectorY, rightPos, (0,255,0)))
# viewer.doc.addRenderer('right_oriZ', yr.VectorsRenderer(rightVectorZ, rightPos, (0,0,255)))
# foot_viewer = FootWindow(viewer.x() + viewer.w() + 20, viewer.y(), 300, 400, 'foot contact modifier', controlModel)
foot_viewer = None # type: FootWindow
initKt = 25.
# initKt = 60.
initKl = 100.
initKh = 100.
initBl = .1
initBh = .13
# initSupKt = 17
initSupKt = 22
initFm = 50.0
initComX = 0.
initComY = 0.
initComZ = 0.
viewer.objectInfoWnd.add1DSlider("Kt", 0., 300., 1., initKt)
viewer.objectInfoWnd.add1DSlider("Kl", 0., 300., 1., initKl)
viewer.objectInfoWnd.add1DSlider("Kh", 0., 300., 1., initKh)
viewer.objectInfoWnd.add1DSlider("Bl", 0., 1., .001, initBl)
viewer.objectInfoWnd.add1DSlider("Bh", 0., 1., .001, initBh)
viewer.objectInfoWnd.add1DSlider("SupKt", 0., 300., 0.1, initSupKt)
viewer.objectInfoWnd.add1DSlider("Fm", 0., 1000., 10., initFm)
viewer.objectInfoWnd.add1DSlider("com X offset", -1., 1., 0.01, initComX)
viewer.objectInfoWnd.add1DSlider("com Y offset", -1., 1., 0.01, initComY)
viewer.objectInfoWnd.add1DSlider("com Z offset", -1., 1., 0.01, initComZ)
viewer.objectInfoWnd.add1DSlider("tiptoe angle", -0.5, .5, 0.001, 0.)
viewer.objectInfoWnd.add1DSlider("left tilt angle", -0.5, .5, 0.001, 0.)
viewer.objectInfoWnd.add1DSlider("right tilt angle", -0.5, .5, 0.001, 0.)
viewer.force_on = False
def viewer_SetForceState(object):
viewer.force_on = True
def viewer_GetForceState():
return viewer.force_on
def viewer_ResetForceState():
viewer.force_on = False
viewer.objectInfoWnd.addBtn('Force on', viewer_SetForceState)
viewer_ResetForceState()
offset = 60
viewer.objectInfoWnd.begin()
viewer.objectInfoWnd.labelForceX = Fl_Value_Input(20, 30+offset*9, 40, 20, 'X')
viewer.objectInfoWnd.labelForceX.value(0)
viewer.objectInfoWnd.labelForceY = Fl_Value_Input(80, 30+offset*9, 40, 20, 'Y')
viewer.objectInfoWnd.labelForceY.value(0)
viewer.objectInfoWnd.labelForceZ = Fl_Value_Input(140, 30+offset*9, 40, 20, 'Z')
viewer.objectInfoWnd.labelForceZ.value(1)
viewer.objectInfoWnd.labelForceDur = Fl_Value_Input(220, 30+offset*9, 40, 20, 'Dur')
viewer.objectInfoWnd.labelForceDur.value(0.1)
viewer.objectInfoWnd.end()
# self.sliderFm = Fl_Hor_Nice_Slider(10, 42+offset*6, 250, 10)
def getParamVal(paramname):
return viewer.objectInfoWnd.getVal(paramname)
def getParamVals(paramnames):
return (getParamVal(name) for name in paramnames)
def setParamVal(paramname, val):
viewer.objectInfoWnd.setVal(paramname, val)
idDic = dict()
for i in range(motion[0].skeleton.getJointNum()):
idDic[motion[0].skeleton.getJointName(i)] = i
# extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0']
extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0']
lIDdic = {'Left'+name: motion[0].skeleton.getJointIndex('Left'+name) for name in extendedFootName}
rIDdic = {'Right'+name: motion[0].skeleton.getJointIndex('Right'+name) for name in extendedFootName}
footIdDic = lIDdic.copy()
footIdDic.update(rIDdic)
lIDlist = [motion[0].skeleton.getJointIndex('Left'+name) for name in extendedFootName]
rIDlist = [motion[0].skeleton.getJointIndex('Right'+name) for name in extendedFootName]
footIdlist = []
footIdlist.extend(lIDlist)
footIdlist.extend(rIDlist)
foot_left_idx = motion[0].skeleton.getJointIndex('LeftFoot')
foot_right_idx = motion[0].skeleton.getJointIndex('RightFoot')
foot_left_idx_temp = motion[0].skeleton.getJointIndex('LeftFoot_foot_1_0')
foot_right_idx_temp = motion[0].skeleton.getJointIndex('RightFoot_foot_1_0')
# ik_solver = hik.numIkSolver(dartIkModel)
# ik_solver.clear()
# bodyIDsToCheck = rIDlist.copy()
joint_names = [motion[0].skeleton.getJointName(i) for i in range(motion[0].skeleton.getJointNum())]
def fix_dofs(_DOFs, nested_dof_values, _mcfg, _joint_names):
fixed_nested_dof_values = list()
fixed_nested_dof_values.append(nested_dof_values[0])
for i in range(1, len(_DOFs)):
dof = _DOFs[i]
if dof == 1:
node = _mcfg.getNode(_joint_names[i])
axis = mm.unitZ()
if node.jointAxes[0] == 'X':
axis = mm.unitX()
elif node.jointAxes[0] == 'Y':
axis = mm.unitY()
fixed_nested_dof_values.append(np.array([np.dot(nested_dof_values[i], axis)]))
else:
fixed_nested_dof_values.append(nested_dof_values[i])
return fixed_nested_dof_values
start_frame = 100
up_vec_in_each_link = dict()
for foot_id in footIdlist:
up_vec_in_each_link[foot_id] = controlModel_ik.getBodyOrientationGlobal(foot_id)[1, :]
controlModel_ik.set_q(controlModel.get_q())
###################################
# simulate
###################################
def simulateCallback(frame):
# print(frame)
# print(motion[frame].getJointOrientationLocal(footIdDic['RightFoot_foot_0_1_0']))
if True:
if frame == 200:
if motionFile == 'wd2_tiptoe.bvh':
setParamVal('tiptoe angle', 0.3)
if motionFile == 'wd2_tiptoe_zygote.bvh':
setParamVal('tiptoe angle', 0.3)
# elif 210 < frame < 240:
# if motionFile == 'wd2_tiptoe_zygote.bvh':
# setParamVal('com Y offset', 0.01/30. * (frame-110))
elif frame == 400:
setParamVal('com Y offset', 0.)
setParamVal('tiptoe angle', 0.)
elif frame == 430:
foot_viewer.check_all_seg()
# setParamVal('SupKt', 30.)
# elif frame == 400:
# setParamVal('SupKt', 17.)
# hfi.footAdjust(motion[frame], idDic, SEGMENT_FOOT_MAG=.03, SEGMENT_FOOT_RAD=.015, baseHeight=0.02)
if abs(getParamVal('tiptoe angle')) > 0.001:
tiptoe_angle = getParamVal('tiptoe angle')
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_0_0'],
mm.exp(mm.unitX(), -math.pi * tiptoe_angle))
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1_0'],
mm.exp(mm.unitX(), -math.pi * tiptoe_angle))
motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_0_0'],
mm.exp(mm.unitX(), -math.pi * tiptoe_angle))
motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1_0'],
mm.exp(mm.unitX(), -math.pi * tiptoe_angle))
# motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle * 0.95))
# motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle * 0.95))
motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle))
motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle))
if getParamVal('left tilt angle') > 0.001:
left_tilt_angle = getParamVal('left tilt angle')
if motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1') is not None:
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1'], mm.exp(mm.unitZ(), -math.pi * left_tilt_angle))
else:
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1_0'], mm.exp(mm.unitZ(), -math.pi * left_tilt_angle))
motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))
elif getParamVal('left tilt angle') < -0.001:
left_tilt_angle = getParamVal('left tilt angle')
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_0'], mm.exp(mm.unitZ(), -math.pi * left_tilt_angle))
if motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1') is not None:
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))
else:
motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1_0'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))
motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))
if getParamVal('right tilt angle') > 0.001:
right_tilt_angle = getParamVal('right tilt angle')
if motion[0].skeleton.getJointIndex('RightFoot_foot_0_1') is not None:
motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1'], mm.exp(mm.unitZ(), math.pi * right_tilt_angle))
else:
motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1_0'], mm.exp(mm.unitZ(), math.pi * right_tilt_angle))
motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))
elif getParamVal('right tilt angle') < -0.001:
right_tilt_angle = getParamVal('right tilt angle')
motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_0'], mm.exp(mm.unitZ(), math.pi * right_tilt_angle))
if motion[0].skeleton.getJointIndex('RightFoot_foot_0_1') is not None:
motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))
# else:
# motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1_0'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))
motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))
motionModel.update(motion[frame])
motionModel.translateByOffset(np.array([getParamVal('com X offset'), getParamVal('com Y offset'), getParamVal('com Z offset')]))
controlModel_ik.set_q(controlModel.get_q())
global g_initFlag
global forceShowTime
global JsysPre
global JsupPreL
global JsupPreR
global JconstPre
global preFootCenter
global maxContactChangeCount
global contactChangeCount
global contact
global contactChangeType
Kt, Kl, Kh, Bl, Bh, kt_sup = getParamVals(['Kt', 'Kl', 'Kh', 'Bl', 'Bh', 'SupKt'])
Dt = 2*(Kt**.5)
Dl = 2*(Kl**.5)
Dh = 2*(Kh**.5)
dt_sup = 2*(kt_sup**.5)
# tracking
th_r = motion.getDOFPositions(frame)
th = controlModel.getDOFPositions()
dth_r = motion.getDOFVelocities(frame)
dth = controlModel.getDOFVelocities()
ddth_r = motion.getDOFAccelerations(frame)
ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt)
# ype.flatten(fix_dofs(DOFs, ddth_des, mcfg, joint_names), ddth_des_flat)
# ype.flatten(fix_dofs(DOFs, dth, mcfg, joint_names), dth_flat)
ype.flatten(ddth_des, ddth_des_flat)
ype.flatten(dth, dth_flat)
#################################################
# jacobian
#################################################
contact_des_ids = list() # desired contact segments
if foot_viewer.check_om_l.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_0'))
if foot_viewer.check_op_l.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_0_0'))
if foot_viewer.check_im_l is not None and foot_viewer.check_im_l.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1'))
if foot_viewer.check_ip_l.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1_0'))
if foot_viewer.check_h_l.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_1_0'))
if foot_viewer.check_om_r.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_0'))
if foot_viewer.check_op_r.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_0_0'))
if foot_viewer.check_im_r is not None and foot_viewer.check_im_r.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_1'))
if foot_viewer.check_ip_r.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_1_0'))
if foot_viewer.check_h_r.value():
contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_1_0'))
contact_ids = list() # temp idx for balancing
contact_ids.extend(contact_des_ids)
contact_joint_ori = list(map(controlModel.getJointOrientationGlobal, contact_ids))
contact_joint_pos = list(map(controlModel.getJointPositionGlobal, contact_ids))
contact_body_ori = list(map(controlModel.getBodyOrientationGlobal, contact_ids))
contact_body_pos = list(map(controlModel.getBodyPositionGlobal, contact_ids))
contact_body_vel = list(map(controlModel.getBodyVelocityGlobal, contact_ids))
contact_body_angvel = list(map(controlModel.getBodyAngVelocityGlobal, contact_ids))
ref_joint_ori = list(map(motion[frame].getJointOrientationGlobal, contact_ids))
ref_joint_pos = list(map(motion[frame].getJointPositionGlobal, contact_ids))
ref_joint_vel = [motion.getJointVelocityGlobal(joint_idx, frame) for joint_idx in contact_ids]
ref_joint_angvel = [motion.getJointAngVelocityGlobal(joint_idx, frame) for joint_idx in contact_ids]
ref_body_ori = list(map(motionModel.getBodyOrientationGlobal, contact_ids))
ref_body_pos = list(map(motionModel.getBodyPositionGlobal, contact_ids))
# ref_body_vel = list(map(controlModel.getBodyVelocityGlobal, contact_ids))
ref_body_angvel = [motion.getJointAngVelocityGlobal(joint_idx, frame) for joint_idx in contact_ids]
ref_body_vel = [ref_joint_vel[i] + np.cross(ref_joint_angvel[i], ref_body_pos[i] - ref_joint_pos[i])
for i in range(len(ref_joint_vel))]
is_contact = [1] * len(contact_ids)
contact_right = len(set(contact_des_ids).intersection(rIDlist)) > 0
contact_left = len(set(contact_des_ids).intersection(lIDlist)) > 0
contMotionOffset = th[0][0] - th_r[0][0]
linkPositions = controlModel.getBodyPositionsGlobal()
linkVelocities = controlModel.getBodyVelocitiesGlobal()
linkAngVelocities = controlModel.getBodyAngVelocitiesGlobal()
linkInertias = controlModel.getBodyInertiasGlobal()
CM = yrp.getCM(linkPositions, linkMasses, totalMass)
dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)
CM_plane = copy.copy(CM)
CM_plane[1] = 0.
dCM_plane = copy.copy(dCM)
dCM_plane[1] = 0.
P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, linkInertias)
dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, linkAngVelocities, linkInertias)
# calculate jacobian
Jsys, dJsys = controlModel.computeCom_J_dJdq()
J_contacts = [] # type: list[np.ndarray]
dJ_contacts = [] # type: list[np.ndarray]
for contact_id in contact_ids:
J_contacts.append(Jsys[6*contact_id:6*contact_id + 6, :])
dJ_contacts.append(dJsys[6*contact_id:6*contact_id + 6])
# calculate footCenter
footCenter = sum(contact_body_pos) / len(contact_body_pos) if len(contact_body_pos) > 0 \
else .5 * (controlModel.getBodyPositionGlobal(supL) + controlModel.getBodyPositionGlobal(supR))
footCenter[1] = 0.
# if len(contact_body_pos) > 2:
# hull = ConvexHull(contact_body_pos)
footCenter_ref = sum(ref_body_pos) / len(ref_body_pos) if len(ref_body_pos) > 0 \
else .5 * (motionModel.getBodyPositionGlobal(supL) + motionModel.getBodyPositionGlobal(supR))
footCenter_ref = footCenter_ref + contMotionOffset
# if len(ref_body_pos) > 2:
# hull = ConvexHull(ref_body_pos)
footCenter_ref[1] = 0.
# footCenter[0] = footCenter[0] + getParamVal('com X offset')
# footCenter[1] = footCenter[0] + getParamVal('com Y offset')
# footCenter[2] = footCenter[2] + getParamVal('com Z offset')
# initialization
if g_initFlag == 0:
preFootCenter[0] = footCenter.copy()
g_initFlag = 1
# if contactChangeCount == 0 and np.linalg.norm(footCenter - preFootCenter[0]) > 0.01:
# contactChangeCount += 30
if contactChangeCount > 0:
# change footcenter gradually
footCenter = preFootCenter[0] + (maxContactChangeCount - contactChangeCount)*(footCenter-preFootCenter[0])/maxContactChangeCount
else:
preFootCenter[0] = footCenter.copy()
# linear momentum
# TODO:
# We should consider dCM_ref, shouldn't we?
# add getBodyPositionGlobal and getBodyPositionsGlobal in csVpModel!
# to do that, set joint velocities to vpModel
CM_ref_plane = footCenter
# CM_ref_plane = footCenter_ref
CM_ref = footCenter + np.array([getParamVal('com X offset'), motionModel.getCOM()[1] + getParamVal('com Y offset'), getParamVal('com Z offset')])
dL_des_plane = Kl * totalMass * (CM_ref - CM) - Dl * totalMass * dCM
# dL_des_plane = Kl * totalMass * (CM_ref_plane - CM_plane) - Dl * totalMass * dCM_plane
# dL_des_plane[1] = 0.
# print('dCM_plane : ', np.linalg.norm(dCM_plane))
# angular momentum
CP_ref = footCenter
# CP_ref = footCenter_ref
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
CP = yrp.getCP(contactPositions, contactForces)
if CP_old[0] is None or CP is None:
dCP = None
else:
dCP = (CP - CP_old[0])/(1/30.)
CP_old[0] = CP
if CP is not None and dCP is not None:
ddCP_des = Kh*(CP_ref - CP) - Dh * dCP
dCP_des = dCP + ddCP_des * (1/30.)
CP_des = CP + dCP_des * (1/30.)
# CP_des = footCenter
CP_des = CP + dCP*(1/30.) + .5*ddCP_des*((1/30.)**2)
dH_des = np.cross((CP_des - CM), (dL_des_plane + totalMass * mm.s2v(wcfg.gravity)))
if contactChangeCount > 0: # and contactChangeType == 'DtoS':
dH_des *= (maxContactChangeCount - contactChangeCount)/maxContactChangeCount
else:
dH_des = None
# convex hull
contact_pos_2d = np.asarray([np.array([contactPosition[0], contactPosition[2]]) for contactPosition in contactPositions])
p = np.array([CM_plane[0], CM_plane[2]])
# hull = None # type: Delaunay
# if contact_pos_2d.shape[0] > 0:
# hull = Delaunay(contact_pos_2d)
# print(hull.find_simplex(p) >= 0)
# set up equality constraint
# TODO:
# logSO3 is just q'', not acceleration.
# To make a_oris acceleration, q'' -> a will be needed
# body_ddqs = list(map(mm.logSO3, [mm.getSO3FromVectors(np.dot(body_ori, mm.unitY()), mm.unitY()) for body_ori in contact_body_ori]))
# body_ddqs = list(map(mm.logSO3, [np.dot(contact_body_ori[i].T, np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], mm.unitY()), mm.unitY()))) for i in range(len(contact_body_ori))]))
body_ddqs = list(map(mm.logSO3, [np.dot(contact_body_ori[i].T, np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], up_vec_in_each_link[contact_ids[i]]), mm.unitY()))) for i in range(len(contact_body_ori))]))
body_qs = list(map(mm.logSO3, contact_body_ori))
body_angs = [np.dot(contact_body_ori[i], contact_body_angvel[i]) for i in range(len(contact_body_ori))]
body_dqs = [mm.vel2qd(body_angs[i], body_qs[i]) for i in range(len(body_angs))]
a_oris = [np.dot(contact_body_ori[i], mm.qdd2accel(body_ddqs[i], body_dqs[i], body_qs[i])) for i in range(len(contact_body_ori))]
a_oris = list(map(mm.logSO3, [np.dot(np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], up_vec_in_each_link[contact_ids[i]]), mm.unitY())), contact_body_ori[i].T) for i in range(len(contact_body_ori))]))
# body_ddq = body_ddqs[0]
# body_ori = contact_body_ori[0]
# body_ang = np.dot(body_ori.T, contact_body_angvel[0])
#
# body_q = mm.logSO3(body_ori)
# body_dq = mm.vel2qd(body_ang, body_q)
# a_ori = np.dot(body_ori, mm.qdd2accel(body_ddq, body_dq, body_q))
KT_SUP = np.diag([kt_sup/5., kt_sup, kt_sup/5.])
# a_oris = list(map(mm.logSO3, [mm.getSO3FromVectors(np.dot(body_ori, mm.unitY()), mm.unitY()) for body_ori in contact_body_ori]))
# a_sups = [np.append(kt_sup*(ref_body_pos[i] - contact_body_pos[i] + contMotionOffset) + dt_sup*(ref_body_vel[i] - contact_body_vel[i]),
# kt_sup*a_oris[i]+dt_sup*(ref_body_angvel[i]-contact_body_angvel[i])) for i in range(len(a_oris))]
# a_sups = [np.append(kt_sup*(ref_body_pos[i] - contact_body_pos[i] + contMotionOffset) - dt_sup * contact_body_vel[i],
# kt_sup*a_oris[i] - dt_sup * contact_body_angvel[i]) for i in range(len(a_oris))]
a_sups = [np.append(np.dot(KT_SUP, (ref_body_pos[i] - contact_body_pos[i] + contMotionOffset)) - dt_sup * contact_body_vel[i],
kt_sup*a_oris[i] - dt_sup * contact_body_angvel[i]) for i in range(len(a_oris))]
# momentum matrix
RS = np.dot(P, Jsys)
R, S = np.vsplit(RS, 2)
# rs = np.dot((np.dot(dP, Jsys) + np.dot(P, dJsys)), dth_flat)
rs = np.dot(dP, np.dot(Jsys, dth_flat)) + np.dot(P, dJsys)
r_bias, s_bias = np.hsplit(rs, 2)
#######################################################
# optimization
#######################################################
# if contact == 2 and footCenterR[1] > doubleTosingleOffset/2:
if contact_left and not contact_right:
config['weightMap']['RightUpLeg'] = .8
config['weightMap']['RightLeg'] = .8
config['weightMap']['RightFoot'] = .8
else:
config['weightMap']['RightUpLeg'] = .1
config['weightMap']['RightLeg'] = .25
config['weightMap']['RightFoot'] = .2
# if contact == 1 and footCenterL[1] > doubleTosingleOffset/2:
if contact_right and not contact_left:
config['weightMap']['LeftUpLeg'] = .8
config['weightMap']['LeftLeg'] = .8
config['weightMap']['LeftFoot'] = .8
else:
config['weightMap']['LeftUpLeg'] = .1
config['weightMap']['LeftLeg'] = .25
config['weightMap']['LeftFoot'] = .2
w = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])
mot.addTrackingTerms(problem, totalDOF, Bt, w, ddth_des_flat)
if dH_des is not None:
mot.addLinearTerms(problem, totalDOF, Bl, dL_des_plane, R, r_bias)
mot.addAngularTerms(problem, totalDOF, Bh, dH_des, S, s_bias)
if True:
for c_idx in range(len(contact_ids)):
mot.addConstraint2(problem, totalDOF, J_contacts[c_idx], dJ_contacts[c_idx], dth_flat, a_sups[c_idx])
if contactChangeCount > 0:
contactChangeCount = contactChangeCount - 1
if contactChangeCount == 0:
maxContactChangeCount = 30
contactChangeType = 0
r = problem.solve()
problem.clear()
ddth_sol_flat = np.asarray(r['x'])
# ddth_sol_flat[foot_seg_dofs] = np.array(ddth_des_flat)[foot_seg_dofs]
ype.nested(ddth_sol_flat, ddth_sol)
rootPos[0] = controlModel.getBodyPositionGlobal(selectedBody)
localPos = [[0, 0, 0]]
for i in range(stepsPerFrame):
# apply penalty force
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
# bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = vpWorld.calcManyPenaltyForce(0, bodyIDsToCheck, mus, Ks, Ds)
vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)
controlModel.setDOFAccelerations(ddth_sol)
# controlModel.setDOFAccelerations(ddth_des)
# controlModel.set_ddq(ddth_sol_flat)
# controlModel.set_ddq(ddth_des_flat)
controlModel.solveHybridDynamics()
if forceShowTime > viewer.objectInfoWnd.labelForceDur.value():
forceShowTime = 0
viewer_ResetForceState()
forceforce = np.array([viewer.objectInfoWnd.labelForceX.value(), viewer.objectInfoWnd.labelForceY.value(), viewer.objectInfoWnd.labelForceZ.value()])
extraForce[0] = getParamVal('Fm') * mm.normalize2(forceforce)
if viewer_GetForceState():
forceShowTime += wcfg.timeStep
vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
vpWorld.step()
controlModel_ik.set_q(controlModel.get_q())
# rendering
bodyIDs, geomIDs, positionLocalsForGeom = vpWorld.getContactInfoForcePlate(bodyIDsToCheck)
for foot_seg_id in footIdlist:
control_model_renderer.body_colors[foot_seg_id] = (255, 240, 255)
control_model_renderer.geom_colors[foot_seg_id] = [(255, 240, 255)] * controlModel.getBodyGeomNum(foot_seg_id)
for i in range(len(geomIDs)):
control_model_renderer.geom_colors[bodyIDs[i]][geomIDs[i]] = (255, 0, 0)
# for foot_seg_id in footIdlist:
# control_model_renderer.body_colors[foot_seg_id] = (255, 240, 255)
#
# for contact_id in contact_ids:
# control_model_renderer.body_colors[contact_id] = (255, 0, 0)
rd_footCenter[0] = footCenter
rd_footCenter_ref[0] = footCenter_ref
rd_CM[0] = CM
rd_CM_plane[0] = CM.copy()
rd_CM_plane[0][1] = 0.
if CP is not None and dCP is not None:
rd_CP[0] = CP
rd_CP_des[0] = CP_des
rd_dL_des_plane[0] = [dL_des_plane[0]/100, dL_des_plane[1]/100, dL_des_plane[2]/100]
rd_dH_des[0] = dH_des
rd_grf_des[0] = dL_des_plane - totalMass * mm.s2v(wcfg.gravity)
del rd_foot_ori[:]
del rd_foot_pos[:]
# for seg_foot_id in footIdlist:
# rd_foot_ori.append(controlModel.getJointOrientationGlobal(seg_foot_id))
# rd_foot_pos.append(controlModel.getJointPositionGlobal(seg_foot_id))
rd_foot_ori.append(controlModel.getJointOrientationGlobal(supL))
rd_foot_ori.append(controlModel.getJointOrientationGlobal(supR))
rd_foot_pos.append(controlModel.getJointPositionGlobal(supL))
rd_foot_pos.append(controlModel.getJointPositionGlobal(supR))
rd_root_des[0] = rootPos[0]
rd_root_ori[0] = controlModel.getBodyOrientationGlobal(0)
rd_root_pos[0] = controlModel.getBodyPositionGlobal(0)
del rd_CF[:]
del rd_CF_pos[:]
for i in range(len(contactPositions)):
rd_CF.append(contactForces[i]/400)
rd_CF_pos.append(contactPositions[i].copy())
if viewer_GetForceState():
rd_exfen_des[0] = [extraForce[0][0]/100, extraForce[0][1]/100, extraForce[0][2]/100]
rd_exf_des[0] = [0, 0, 0]
else:
rd_exf_des[0] = [extraForce[0][0]/100, extraForce[0][1]/100, extraForce[0][2]/100]
rd_exfen_des[0] = [0, 0, 0]
extraForcePos[0] = controlModel.getBodyPositionGlobal(selectedBody)
# render contact_ids
# render skeleton
if SKELETON_ON:
Ts = dict()
Ts['pelvis'] = controlModel.getJointTransform(idDic['Hips'])
Ts['thigh_R'] = controlModel.getJointTransform(idDic['RightUpLeg'])
Ts['shin_R'] = controlModel.getJointTransform(idDic['RightLeg'])
Ts['foot_R'] = controlModel.getJointTransform(idDic['RightFoot'])
Ts['foot_heel_R'] = controlModel.getJointTransform(idDic['RightFoot'])
Ts['heel_R'] = np.eye(4)
Ts['outside_metatarsal_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_0'])
Ts['outside_phalanges_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_0_0'])
# Ts['inside_metatarsal_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_1'])
Ts['inside_metatarsal_R'] = np.eye(4)
Ts['inside_phalanges_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_1_0'])
Ts['spine_ribs'] = controlModel.getJointTransform(idDic['Spine'])
Ts['head'] = controlModel.getJointTransform(idDic['Spine1'])
Ts['upper_limb_R'] = controlModel.getJointTransform(idDic['RightArm'])
Ts['lower_limb_R'] = controlModel.getJointTransform(idDic['RightForeArm'])
Ts['thigh_L'] = controlModel.getJointTransform(idDic['LeftUpLeg'])
Ts['shin_L'] = controlModel.getJointTransform(idDic['LeftLeg'])
Ts['foot_L'] = controlModel.getJointTransform(idDic['LeftFoot'])
Ts['foot_heel_L'] = controlModel.getJointTransform(idDic['LeftFoot'])
Ts['heel_L'] = np.eye(4)
Ts['outside_metatarsal_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_0'])
Ts['outside_phalanges_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_0_0'])
# Ts['inside_metatarsal_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_1'])
Ts['inside_metatarsal_L'] = np.eye(4)
Ts['inside_phalanges_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_1_0'])
Ts['upper_limb_L'] = controlModel.getJointTransform(idDic['LeftArm'])
Ts['lower_limb_L'] = controlModel.getJointTransform(idDic['LeftForeArm'])
skeleton_renderer.appendFrameState(Ts)
def postFrameCallback_Always(frame):
if foot_viewer is not None:
foot_viewer.foot_pressure_gl_window.refresh_foot_contact_info(frame, vpWorld, bodyIDsToCheck, mus, Ks, Ds)
foot_viewer.foot_pressure_gl_window.goToFrame(frame)
viewer.setPostFrameCallback_Always(postFrameCallback_Always)
viewer.setSimulateCallback(simulateCallback)
viewer.startTimer(1/30.)
# viewer.play()
viewer.show()
foot_viewer = FootWindow(viewer.x() + viewer.w() + 20, viewer.y(), 300, 500, 'foot contact modifier', controlModel)
foot_viewer.show()
foot_viewer.check_op_l.value(True)
foot_viewer.check_ip_l.value(True)
foot_viewer.check_op_r.value(True)
foot_viewer.check_ip_r.value(True)
viewer.motionViewWnd.goToFrame(0)
Fl.run()
main()
|
py | 7df8e7a47ca3845a8d3dd48f1eafc72dbc103575 | from typing import Tuple
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class Dot(nn.Module):
"""Learn from """
def __init__(self):
super().__init__()
def forward(self, left: torch.Tensor, right: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
compute attention weights and apply it to `right` tensor
Parameters
----------
left: `torch.Tensor` of shape (B, D)
right: `torch.Tensor` of shape (B, L, D)
mask: `torch.Tensor` of shape (B, L), binary value, 0 is for pad
Returns
-------
"""
assert left.size(0) == right.size(0) and left.size(-1) == right.size(-1), "Must same dimensions"
assert len(left.size()) == 2 and len(right.size()) == 3
left = left.unsqueeze(1) # (B, 1, D)
tmp = torch.bmm(left, right.permute(0, 2, 1)) # (B, 1, D) * (B, D, L) => (B, 1, L)
tmp = tmp.squeeze(1)
doc_mask = (mask == 0)
out = tmp.masked_fill(doc_mask, -np.inf)
attention_weights = F.softmax(out, dim=1) # (B, L)
avg = right * attention_weights.unsqueeze(-1) # (B, L, D) * (B, L, 1) => (B, L, D)
assert len(avg.size()) == 3
avg = torch.sum(avg, dim = 1) # dim = 1 compute on middel dimension
return avg, attention_weights
class BiLinear(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.W = nn.Linear(dim, dim)
def forward(self, left: torch.Tensor, right: torch.Tensor, mask: torch.Tensor) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
compute attention weights and apply it to `right` tensor
Parameters
----------
left: `torch.Tensor` of shape (B, D)
right: `torch.Tensor` of shape (B, L, D)
mask: `torch.Tensor` of shape (B, L), binary value, 0 is for pad
Returns
-------
"""
assert left.size(0) == right.size(0) and left.size(-1) == right.size(-1), "Must same dimensions"
assert len(left.size()) == 2 and len(right.size()) == 3
left = self.W(left) # (B, D)
left = left.unsqueeze(1) # (B, 1, D)
tmp = torch.bmm(left, right.permute(0, 2, 1)) # (B, 1, D) * (B, D, L) => (B, 1, L)
tmp = tmp.squeeze(1)
doc_mask = (mask == 0)
out = tmp.masked_fill(doc_mask, -np.inf)
attention_weights = F.softmax(out, dim=1) # (B, L)
avg = right * attention_weights.unsqueeze(-1) # (B, L, D) * (B, L, 1) => (B, L, D)
avg = torch.sum(avg, dim = 1) # dim = 1 compute on middel dimension
return avg, attention_weights
class ConcatSelfAtt(nn.Module):
def __init__(self, inp_dim: int, out_dim: int, num_heads: int = 1):
super().__init__()
self.inp_dim = inp_dim
self.out_dim = out_dim
self.num_heads = num_heads
self.linear1 = nn.Linear(inp_dim, out_dim, bias=False)
self.linear2 = nn.Linear(out_dim, num_heads, bias=False)
def forward(self, left: torch.Tensor, right: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
compute attention weights and apply it to `right` tensor
Parameters
----------
left: `torch.Tensor` of shape (B, X) X is not necessarily equal to D
right: `torch.Tensor` of shape (B, L, D)
mask: `torch.Tensor` of shape (B, L), binary value, 0 is for pad
Returns
-------
"""
assert left.size(0) == right.size(0), "Must same dimensions"
assert len(left.size()) == 2 and len(right.size()) == 3
assert self.inp_dim == (left.size(-1) + right.size(-1)) # due to concat
B, L, D = right.size()
left_tmp = left.unsqueeze(1).expand(B, L, -1) # (B, 1, D)
tsr = torch.cat([left_tmp, right], dim=-1) # (B, L, 2D)
# start computing multi-head self-attention
tmp = torch.tanh(self.linear1(tsr)) # (B, L, out_dim)
linear_out = self.linear2(tmp) # (B, L, C)
doc_mask = (mask == 0) # (B, L) real tokens will be zeros and pad will have non zero (this is for softmax)
doc_mask = doc_mask.unsqueeze(-1).expand(B, L, self.num_heads) # (B, L, C)
linear_out = linear_out.masked_fill(doc_mask, -np.inf) # I learned from Attention is all you need
# we now can ensure padding tokens will not contribute to softmax
attention_weights = F.softmax(linear_out, dim=1) # (B, L, C)
attended = torch.bmm(right.permute(0, 2, 1), attention_weights) # (B, D, L) * (B, L, C) => (B, D, C)
return attended, attention_weights
class ConcatNotEqualSelfAtt(nn.Module):
def __init__(self, inp_dim: int, out_dim: int, num_heads: int = 1):
super().__init__()
self.inp_dim = inp_dim
self.out_dim = out_dim
self.num_heads = num_heads
self.linear1 = nn.Linear(inp_dim, out_dim, bias=False)
self.linear2 = nn.Linear(out_dim, num_heads, bias=False)
def forward(self, left: torch.Tensor, right: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
compute attention weights and apply it to `right` tensor
Parameters
----------
left: `torch.Tensor` of shape (B, X) X is not necessarily equal to D
right: `torch.Tensor` of shape (B, L, D)
mask: `torch.Tensor` of shape (B, L), binary value, 0 is for pad
Returns
-------
"""
assert left.size(0) == right.size(0), "Must same dimensions"
assert len(left.size()) == 2 and len(right.size()) == 3
assert self.inp_dim == (left.size(-1) + right.size(-1)) # due to concat
B, L, D = right.size()
left_tmp = left.unsqueeze(1).expand(B, L, -1) # (B, 1, X)
tsr = torch.cat([left_tmp, right], dim=-1) # (B, L, 2D)
# start computing multi-head self-attention
tmp = torch.tanh(self.linear1(tsr)) # (B, L, out_dim)
linear_out = self.linear2(tmp) # (B, L, C)
doc_mask = (mask == 0) # (B, L) real tokens will be zeros and pad will have non zero (this is for softmax)
doc_mask = doc_mask.unsqueeze(-1).expand(B, L, self.num_heads) # (B, L, C)
linear_out = linear_out.masked_fill(doc_mask, -np.inf) # I learned from Attention is all you need
# we now can ensure padding tokens will not contribute to softmax
attention_weights = F.softmax(linear_out, dim=1) # (B, L, C)
attended = torch.bmm(right.permute(0, 2, 1), attention_weights) # (B, D, L) * (B, L, C) => (B, D, C)
return attended, attention_weights
class BiLinearTanh(nn.Module):
def __init__(self, left_dim: int, right_dim: int, out_dim: int):
"""
Implementation of equation v_s^T \tanh(W_1 * h_{ij} + W_s * x + b_s)
Parameters
----------
left_dim: `int` dimension of left tensor
right_dim: `int` dimesion of right tensor
out_dim
"""
super().__init__()
self.left_linear = nn.Linear(left_dim, out_dim, bias=True)
self.right_linear = nn.Linear(right_dim, out_dim, bias=False)
self.combine = nn.Linear(out_dim, 1, bias=False)
def forward(self, left_tsr: torch.Tensor, right_tsr: torch.Tensor, mask: torch.Tensor):
"""
compute attention weights on left tensor based on the right tensor.
Parameters
----------
left_tsr: `torch.Tensor` of shape (B, L, H)
right_tsr: `torch.Tensor` of shape (B, D)
mask: `torch.Tensor` of shape (B, L) 1 is for real, 0 is for pad
Returns
-------
"""
assert len(left_tsr.size()) == 3 and len(mask.size()) == 2
left = self.left_linear(left_tsr) # (B, L, O)
right = self.right_linear(right_tsr).unsqueeze(1) # (B, O)
tmp = torch.tanh(left + right) # (B, L, O)
linear_out = self.combine(tmp).squeeze(-1) # (B, L) it is equal to v_s^T \tanh(W_1 * h_{ij} + W_2 * a + b_s)
doc_mask = (mask == 0)
linear_out = linear_out.masked_fill(doc_mask, -np.inf)
# we now can ensure padding tokens will not contribute to softmax
attention_weights = F.softmax(linear_out, dim = -1) # (B, L)
attended = left_tsr * attention_weights.unsqueeze(-1) # (B, L, H)
attended = torch.sum(attended, dim = 1) # (B, H)
return attended, attention_weights
class MultiHeadAttentionSimple(nn.Module):
def __init__(self, num_heads: int, d_model: int, d_key: int, d_value: int,
# attention_type: int = AttentionType.ConcatNotEqual,
init_weights: bool = False,
use_layer_norm: bool = False):
"""
Simple multi-head attention and customizable with layer-norm
Parameters
----------
num_heads: `int` the number of heads. how many aspects of the evidences you want to see
d_model: `int` input embedding size
d_key: `int` dimension of keys. We will set d_key = d_model
d_value: `int` dimensions of key, d_value = d_model
init_weights: `bool` whether we should init linear layers.
use_layer_norm: `bool` whether we should use layer-norm
"""
super().__init__()
self.num_heads = num_heads
self.d_model, self.d_key, self.d_value = d_model, d_key, d_value
assert d_model == d_key == d_value
self.use_layer_norm = use_layer_norm
self.w_qs = nn.Linear(d_model, num_heads * d_key) # gom tat ca head vo 1 matrix de nhan co de
self.w_ks = nn.Linear(d_model, num_heads * d_key)
self.w_vs = nn.Linear(d_model, num_heads * d_value)
if init_weights:
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_key)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_key)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_value)))
# if attention_type == AttentionType.ConcatNotEqual:
self.attention_func = ConcatNotEqualSelfAttTransFormer(inp_dim=(d_key + d_key), out_dim=d_key)
# else:
# self.attention_func = ScaledDotProductAttention(temperature=np.power(d_key, 0.5))
self.fc = nn.Linear(num_heads * d_value, d_model)
if init_weights: nn.init.xavier_normal_(self.fc.weight)
if use_layer_norm: self.layer_norm = nn.LayerNorm(d_model)
def forward(self, left: torch.Tensor, right: torch.Tensor, mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
compute attention weights and apply it to `right` tensor
Parameters
----------
left: `torch.Tensor` of shape (B, X) X is not necessarily equal to D
right: `torch.Tensor` of shape (B, L, D)
mask: `torch.Tensor` of shape (B, L), binary value, 0 is for pad
Returns
-------
"""
assert left.size(0) == right.size(0), "Must same dimensions"
assert len(left.size()) == 2 and len(right.size()) == 3
B, L, D = right.size()
assert D == self.d_model == self.d_key, "Must have same shape"
len_q = 1
# transform
query = self.w_qs(left).view(B, len_q, self.num_heads, self.d_key) # (B, 1, num_heads, d_key)
key = self.w_ks(right).view(B, L, self.num_heads, self.d_key) # (B, L, num_heads, d_key)
value = self.w_vs(right).view(B, L, self.num_heads, self.d_value) # (B, L, num_heads, d_value)
# reshape
q = query.permute(2, 0, 1, 3).contiguous().view(-1, len_q, self.d_key) # (num_heads * B) x 1 x dk
k = key.permute(2, 0, 1, 3).contiguous().view(-1, L, self.d_key) # (num_heads * B) x L x dk
v = value.permute(2, 0, 1, 3).contiguous().view(-1, L, self.d_value) # (num_heads * B) x L x dv
# compute attention weights
mask = (mask == 0)
mask = mask.unsqueeze(1).repeat(self.num_heads, 1, 1) # (B * num_heads, 1, L)
attended, attention_weights = self.attention_func(query=q, key=k, value=v, mask=mask)
# concat all heads and push to MLP followed by optional layer_norm
output = attended.view(self.num_heads, B, len_q, self.d_value)
output = output.permute(1, 2, 0, 3).contiguous().view(B, len_q, -1) # b x lq x (n*dv)
tmp = self.fc(output)
if self.use_layer_norm: tmp = self.layer_norm(tmp)
return tmp, attention_weights
class MultiHeadAttentionOriginal(nn.Module):
''' Multi-Head Attention module copied from PyTorch Transformer '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
"""
Parameters
----------
n_head: `int` number of attention layers or heads
d_model: `int` what the fuck is d_model? is it word embedding size?
d_k
d_v
dropout
"""
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k) # gom tat ca head vo 1 matrix de nhan co de
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
# nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
# nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
# nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
# self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.attention = ScaledDotProductAttention(temperature=np.power(1, 1))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
# nn.init.xavier_normal_(self.fc.weight)
# self.dropout = nn.Dropout(dropout)
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: torch.Tensor = None):
"""
Parameters
----------
q: `torch.Tensor` of shape (B, L, D)
k: `torch.Tensor` of shape (B, R, D)
v: `torch.Tensor` of shape (B, R, D)
mask: `torch.Tensor` of shape (B, L, R) (very important, 1 is for
Returns
-------
"""
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x .. (quite redundant here)
output, _ = self.attention(q, k, v, mask = mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
# output = self.dropout(self.fc(output))
output = self.fc(output)
output = self.layer_norm(output + residual)
return output, None
class ConcatNotEqualSelfAttTransFormer(nn.Module):
def __init__(self, inp_dim: int, out_dim: int):
super().__init__()
self.inp_dim = inp_dim
self.out_dim = out_dim
# self.num_heads = num_heads
self.linear1 = nn.Linear(inp_dim, out_dim, bias=False)
self.linear2 = nn.Linear(out_dim, 1, bias=False)
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
compute attention weights and apply it to `right` tensor
Parameters
----------
query: `torch.Tensor` of shape (B, 1, X) X is not necessarily equal to D
key: `torch.Tensor` of shape (B, L, D)
value: `torch.Tensor` of shape (B, L, D)
mask: `torch.Tensor` of shape (B, L), binary value, 0 is for pad
Returns
-------
"""
assert query.size(0) == key.size(0), "Must same dimensions"
# assert len(query.size()) == 2 and len(key.size()) == 3
assert self.inp_dim == (query.size(-1) + key.size(-1)) # due to concat
B, L, D = key.size()
left_tmp = query.expand(B, L, -1) # (B, 1, X)
tsr = torch.cat([left_tmp, key], dim=-1) # (B, L, 2D)
# start computing multi-head self-attention
tmp = torch.tanh(self.linear1(tsr)) # (B, L, out_dim)
linear_out = self.linear2(tmp) # (B, L, C)
doc_mask = mask.squeeze(1).unsqueeze(-1) # (B, L) real tokens will be zeros and pad will have non zero (this is for softmax)
# doc_mask = doc_mask.unsqueeze(-1).expand(B, L, 1) # (B, L, C)
linear_out = linear_out.masked_fill(doc_mask, -np.inf) # I learned from Attention is all you need
# we now can ensure padding tokens will not contribute to softmax
attention_weights = F.softmax(linear_out, dim=1) # (B, L, C)
attended = torch.bmm(value.permute(0, 2, 1), attention_weights) # (B, D, L) * (B, L, C) => (B, D, C)
return attended, attention_weights
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
# self.dropout = nn.Dropout(attn_dropout)
# self.softmax = nn.Softmax(dim=-1) # are you sure the dimension is correct?
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask=None):
"""
Parameters
----------
query: `torch.Tensor` (n_heads * B, L, d_k)
key: `torch.Tensor` (n_heads * B, L, d_k)
value: `torch.Tensor` (n_heads * B, L, d_k)
mask (n_heads * B, L, L) (this is I guess to remove padding tokens
Returns
-------
"""
attn = torch.bmm(query, key.transpose(1, 2))
# attn = attn / self.temperature
if mask is not None: attn = attn.masked_fill(mask, -np.inf)
attn = F.softmax(attn, dim = -1) # exp of -np.inf would be zero (checked)
attn = attn.masked_fill(mask, 0) # reset nan
# attn = self.dropout(attn) # why there is a fucking shit dropout here???? (I've never seen this before)
output = torch.bmm(attn, value)
return output, attn
class CoDaAttention(nn.Module):
def __init__(self, dim: int):
super().__init__()
def forward(self, *input):
pass |
py | 7df8e8e3b0a4dbd9c5acfcbc4087b2f97e1f2bb0 | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v6.services OperatingSystemVersionConstantService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.ads.google_ads.v6.services import operating_system_version_constant_service_client_config
from google.ads.google_ads.v6.services.transports import operating_system_version_constant_service_grpc_transport
from google.ads.google_ads.v6.proto.services import operating_system_version_constant_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class OperatingSystemVersionConstantServiceClient(object):
"""Service to fetch Operating System Version constants."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v6.services.OperatingSystemVersionConstantService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
OperatingSystemVersionConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def operating_system_version_constant_path(cls, criterion_id):
"""Return a fully-qualified operating_system_version_constant string."""
return google.api_core.path_template.expand(
'operatingSystemVersionConstants/{criterion_id}',
criterion_id=criterion_id,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None, client_options=None):
"""Constructor.
Args:
transport (Union[~.OperatingSystemVersionConstantServiceGrpcTransport,
Callable[[~.Credentials, type], ~.OperatingSystemVersionConstantServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = operating_system_version_constant_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(client_options)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=operating_system_version_constant_service_grpc_transport.OperatingSystemVersionConstantServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = operating_system_version_constant_service_grpc_transport.OperatingSystemVersionConstantServiceGrpcTransport(
address=api_endpoint,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_operating_system_version_constant(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested OS version constant in full detail.
Example:
>>> from google.ads import googleads_v6
>>>
>>> client = googleads_v6.OperatingSystemVersionConstantServiceClient()
>>>
>>> resource_name = client.operating_system_version_constant_path('[CRITERION_ID]')
>>>
>>> response = client.get_operating_system_version_constant(resource_name)
Args:
resource_name (str): Required. Resource name of the OS version to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v6.types.OperatingSystemVersionConstant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_operating_system_version_constant' not in self._inner_api_calls:
self._inner_api_calls['get_operating_system_version_constant'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_operating_system_version_constant,
default_retry=self._method_configs['GetOperatingSystemVersionConstant'].retry,
default_timeout=self._method_configs['GetOperatingSystemVersionConstant'].timeout,
client_info=self._client_info,
)
request = operating_system_version_constant_service_pb2.GetOperatingSystemVersionConstantRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_operating_system_version_constant'](request, retry=retry, timeout=timeout, metadata=metadata)
|
py | 7df8e8f631ce2fafac3ad6c603cc55f3a73e0ec5 | #!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC misc output."""
import xml.etree.ElementTree as ET
from test_framework.test_framework import RuvchainTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
)
from test_framework.authproxy import JSONRPCException
class RpcMiscTest(RuvchainTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = self.nodes[0]
self.log.info("test CHECK_NONFATAL")
assert_raises_rpc_error(
-1,
'Internal bug detected: \'request.params[9].get_str() != "trigger_internal_bug"\'',
lambda: node.echo(arg9='trigger_internal_bug'),
)
self.log.info("test getmemoryinfo")
memory = node.getmemoryinfo()['locked']
assert_greater_than(memory['used'], 0)
assert_greater_than(memory['free'], 0)
assert_greater_than(memory['total'], 0)
# assert_greater_than_or_equal() for locked in case locking pages failed at some point
assert_greater_than_or_equal(memory['locked'], 0)
assert_greater_than(memory['chunks_used'], 0)
assert_greater_than(memory['chunks_free'], 0)
assert_equal(memory['used'] + memory['free'], memory['total'])
self.log.info("test mallocinfo")
try:
mallocinfo = node.getmemoryinfo(mode="mallocinfo")
self.log.info('getmemoryinfo(mode="mallocinfo") call succeeded')
tree = ET.fromstring(mallocinfo)
assert_equal(tree.tag, 'malloc')
except JSONRPCException:
self.log.info('getmemoryinfo(mode="mallocinfo") not available')
assert_raises_rpc_error(-8, 'mallocinfo is only available when compiled with glibc 2.10+', node.getmemoryinfo, mode="mallocinfo")
assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar")
self.log.info("test logging")
assert_equal(node.logging()['qt'], True)
node.logging(exclude=['qt'])
assert_equal(node.logging()['qt'], False)
node.logging(include=['qt'])
assert_equal(node.logging()['qt'], True)
self.log.info("test getindexinfo")
# Without any indices running the RPC returns an empty object
assert_equal(node.getindexinfo(), {})
# Restart the node with indices and wait for them to sync
self.restart_node(0, ["-txindex", "-blockfilterindex"])
self.wait_until(lambda: all(i["synced"] for i in node.getindexinfo().values()))
# Returns a list of all running indices by default
assert_equal(
node.getindexinfo(),
{
"txindex": {"synced": True, "best_block_height": 200},
"basic block filter index": {"synced": True, "best_block_height": 200}
}
)
# Specifying an index by name returns only the status of that index
assert_equal(
node.getindexinfo("txindex"),
{
"txindex": {"synced": True, "best_block_height": 200},
}
)
# Specifying an unknown index name returns an empty result
assert_equal(node.getindexinfo("foo"), {})
if __name__ == '__main__':
RpcMiscTest().main()
|
py | 7df8e92698322c7b81fef20d7c5ba4ea52aeea8d | import datetime
import math
import re
from django.utils.html import strip_tags
def count_words(html_string):
# html_string = """
# <h1>This is a title</h1>
# """
word_string = strip_tags(html_string)
matching_words = re.findall(r'\w+', word_string)
count = len(matching_words) #joincfe.com/projects/
return count
def get_read_time(html_string):
count = count_words(html_string)
read_time_min = math.ceil(count/200.0) #assuming 200wpm reading
# read_time_sec = read_time_min * 60
# read_time = str(datetime.timedelta(seconds=read_time_sec))
# read_time = str(datetime.timedelta(minutes=read_time_min))
return int(read_time_min) |
py | 7df8ebfd47999defa755a04cfd67b2e3863a47a8 | """
Django settings for reactify project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ksy0*&5)&=lxwj-7g+$zepz3#2*vt#os^nbi5o&2#k_s5s6ee@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'reactify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'reactify.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'staticfiles'),
]
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static-cdn-local')
CORS_URLS_REGEX = r'^/api.*'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = (
'*',
'your-domain.com',
'your-bucket-here.s3-us-west-2.amazonaws.com',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
|
py | 7df8ec0eca393eba7b3291ef57db4fb9d8c2f3d9 | '''
Test SERVER_VERIFY_HOOK
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server", ssl=True)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# desired response form the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_verify_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server.policy': 'ENFORCED',
'proxy.config.ssl.client.verify.server.properties': 'NONE',
'proxy.config.url_remap.pristine_host_hdr': 1
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://foo.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)
)
ts.Disk.remap_config.AddLine(
'map https://bar.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)
)
ts.Disk.remap_config.AddLine(
'map https://random.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)
)
ts.Disk.sni_yaml.AddLine(
'sni:')
ts.Disk.sni_yaml.AddLine(
'- fqdn: bar.com')
ts.Disk.sni_yaml.AddLine(
' verify_server_policy: PERMISSIVE')
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_verify_test.so'),
ts, '-count=2 -bad=random.com -bad=bar.com')
tr = Test.AddTestRun("request good name")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --resolve \"foo.com:{0}:127.0.0.1\" -k https://foo.com:{0}".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have failed")
tr2 = Test.AddTestRun("request bad name")
tr2.StillRunningAfter = ts
tr2.StillRunningAfter = server
tr2.Processes.Default.Command = "curl --resolve \"random.com:{0}:127.0.0.1\" -k https://random.com:{0}".format(
ts.Variables.ssl_port)
tr2.Processes.Default.ReturnCode = 0
tr2.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Curl attempt should have failed")
tr3 = Test.AddTestRun("request bad name permissive")
tr3.StillRunningAfter = ts
tr3.StillRunningAfter = server
tr3.Processes.Default.Command = "curl --resolve \"bar.com:{0}:127.0.0.1\" -k https://bar.com:{0}".format(ts.Variables.ssl_port)
tr3.Processes.Default.ReturnCode = 0
tr3.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have failed")
# Overriding the built in ERROR check since we expect tr2 to fail
ts.Disk.diags_log.Content = Testers.ContainsExpression(
"WARNING: TS_EVENT_SSL_VERIFY_SERVER plugin failed the origin certificate check for 127.0.0.1. Action=Terminate SNI=random.com",
"random.com should fail")
ts.Disk.diags_log.Content += Testers.ContainsExpression(
"WARNING: TS_EVENT_SSL_VERIFY_SERVER plugin failed the origin certificate check for 127.0.0.1. Action=Continue SNI=bar.com",
"bar.com should fail but continue")
ts.Disk.diags_log.Content += Testers.ExcludesExpression("SNI=foo.com", "foo.com should not fail in any way")
ts.Streams.All += Testers.ContainsExpression(
"Server verify callback 0 [\da-fx]+? - event is good SNI=foo.com good HS", "verify callback happens 2 times")
ts.Streams.All += Testers.ContainsExpression(
"Server verify callback 1 [\da-fx]+? - event is good SNI=foo.com good HS", "verify callback happens 2 times")
ts.Streams.All += Testers.ContainsExpression(
"Server verify callback 0 [\da-fx]+? - event is good SNI=random.com error HS", "verify callback happens 2 times")
ts.Streams.All += Testers.ContainsExpression(
"Server verify callback 1 [\da-fx]+? - event is good SNI=random.com error HS", "verify callback happens 2 times")
ts.Streams.All += Testers.ContainsExpression(
"Server verify callback 0 [\da-fx]+? - event is good SNI=bar.com error HS", "verify callback happens 2 times")
ts.Streams.All += Testers.ContainsExpression(
"Server verify callback 1 [\da-fx]+? - event is good SNI=bar.com error HS", "verify callback happens 2 times")
ts.Streams.All += Testers.ContainsExpression("Server verify callback SNI APIs match=true", "verify SNI names match")
|
py | 7df8edf539aa1c0395a53e225a3f756d3c214486 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for the "ml-pipelines-sdk" package.
Core TFX pipeline authoring SDK, with a minimal set of dependencies.
"""
PACKAGE_NAME = 'ml-pipelines-sdk'
|
py | 7df8ef5ce013238b3e16110887a95f7a16f9c422 | import pytest
from pymultisig.btc_utils import (estimate_transaction_fees,
parse_redeem_script)
class TestEstimateTransactionFees(object):
def test_returns_fees(self):
assert(estimate_transaction_fees(1, 1, 15) == 5115)
assert(estimate_transaction_fees(1, 2, 15) == 5625)
assert(estimate_transaction_fees(1, 3, 15) == 6135)
assert(estimate_transaction_fees(2, 1, 15) == 9570)
assert(estimate_transaction_fees(2, 2, 15) == 10080)
assert(estimate_transaction_fees(2, 3, 15) == 10590)
assert(estimate_transaction_fees(3, 1, 15) == 14025)
assert(estimate_transaction_fees(3, 2, 15) == 14535)
assert(estimate_transaction_fees(3, 3, 15) == 15045)
assert(estimate_transaction_fees(1, 3, 30) == 12270)
assert(estimate_transaction_fees(3, 1, 30) == 28050)
class TestParseRedeemScript(object):
def test_invalid_hex_raises_error(self):
with pytest.raises(ValueError):
parse_redeem_script("zzzz")
def test_invalid_script_raises_error(self):
with pytest.raises(ValueError):
parse_redeem_script("deadbeef")
def test_non_2of3_multisig_raises_error(self):
# 1-of-1 multisig from tx:
# 7edb32d4ffd7a385b763c7a8e56b6358bcd729e747290624e18acdbe6209fc45
rs = "5141042f90074d7a5bf30c72cf3a8dfd1381bdbd30407010e878f3"\
"a11269d5f74a58788505cdca22ea6eab7cfb40dc0e07aba200424a"\
"b0d79122a653ad0c7ec9896bdf51ae"
with pytest.raises(ValueError):
parse_redeem_script(rs)
def test_vectors(self, valid_vectors):
for vector in valid_vectors:
result = parse_redeem_script(vector['redeem_script'])
expected = vector['parsed_redeem_script']
assert(result == expected)
|
py | 7df8ef8b98c791a57ccdcff4d95044e2b0019a96 | from flask import Flask, render_template, request, flash ,session,send_file
from database import users, db_init, db
import re
import csv
from werkzeug.utils import secure_filename
from csrlc import replace_csv
import os
import pytesseract
from io import TextIOWrapper
import xlrd
import openpyxl
from openpyxl.utils.cell import get_column_letter
import os
#import urllib.urlopen
from urllib.request import urlopen
from json import load
import json
import requests
#from xlrd import open_workbook
import base64
app = Flask(__name__)
app.secret_key="1258suy"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///users.sqlite"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db_init(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/typein.html',methods=['GET','POST'])
def typein():
#if request.method=='POST':
#txt=request.form['text']
#else:
return render_template('typein.html')
return render_template('typein.html')
@app.route('/xlsx.html')
def xlsx():
return render_template('xlsx.html')
@app.route('/csv.html')
def csv():
return render_template('csv.html')
@app.route('/text.html')
def text():
return render_template('text.html')
@app.route('/typein-find.html',methods=['GET','POST'])
def find():
if request.method=='POST':
check=request.form['find']
txt=request.form['text']
#a = users( txt, check,"")
x=re.search(check, txt)
#print(x.span())
#db.session.add(a)
#db.session.commit()
if x:
flash(f"Word is found at the position {x.span()}")
#print('Y')
#return render_template('typein-find.html')
else:
flash("Word is not found")
#return render_template('typein-find.html')
return render_template("typein-find.html")
@app.route('/typein-replace.html',methods=['GET','POST'])
def replace():
if request.method=='POST':
check1=request.form['find']
txt1=request.form['text']
rlc=request.form['replace']
new_string = re.sub(check1,rlc,txt1)
flash(new_string)
return render_template("typein-replace.html")
@app.route('/typein-number.html',methods=['GET','POST'])
def number():
if request.method=='POST':
txt1=request.form['text']
pattern = '\d+'
result = re.findall(pattern, txt1)
flash(result)
return render_template("typein-number.html")
@app.route('/typein-https.html',methods=['GET','POST'])
def https():
if request.method=='POST':
txt1=request.form['text']
obj1 = re.findall('(\w+)://',txt1)
flash(f'Protocol:{obj1}')
obj2 = re.findall('://([\w\-\.]+)',txt1)
flash(f'Host:{obj2}')
return render_template("typein-https.html")
@app.route('/typein-ip.html',methods=['GET','POST'])
def ip():
if request.method=='POST':
txt1=request.form['text']
regex = '''^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)$'''
if(re.search(regex, txt1)):
flash("Valid Ip address")
else:
flash("Invalid Ip address")
return render_template("typein-ip.html")
@app.route('/typein-mail.html',methods=['GET','POST'])
def mail():
if request.method=='POST':
txt1=request.form['text']
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,txt1)):
flash("Valid Email")
else:
flash("Invalid Email")
return render_template("typein-mail.html")
@app.route('/typein-quotedword.html',methods=['GET','POST'])
def quotedword():
if request.method=='POST':
txt1=request.form['text']
flash(re.findall(r'"(.*?)"', txt1))
return render_template("typein-quotedword.html")
@app.route('/IP_location.html',methods=['GET','POST'])
def ip_location():
if request.method=='POST':
while True:
txt1=request.form['text']
url="http://ip-api.com/json/"
response=urlopen(url+txt1)
data=response.read()
values=json.loads(data)
flash(" IP: " + values['query'])
flash(" City: " + values['city'])
flash(" ISP: " + values['isp'])
flash(" Country: " + values['country'])
flash(" Region: " + values['region'])
flash(" Time zone: " + values['timezone'])
break
return render_template("IP_location.html")
@app.route('/mobile_location.html',methods=['GET','POST'])
def mobile_location():
import phonenumbers
from phonenumbers import timezone
from phonenumbers import geocoder
from phonenumbers import carrier
if request.method=='POST':
countrycode = request.form['text2']
phonenumber = request.form['text']
num = countrycode+phonenumber
number = phonenumbers.parse(num)
flash(geocoder.description_for_number(number,'en',region="GB"))
flash(carrier.name_for_number(number, 'en'))
timeZone = timezone.time_zones_for_number(number)
flash(timeZone)
return render_template("mobile_location.html")
#for text
@app.route('/text-find.html',methods=['GET','POST'])
def text_find():
return render_template("text-find.html")
@app.route('/text-replace.html',methods=['GET','POST'])
def text_replace():
return render_template("text-replace.html")
#for csv
@app.route('/csv-find.html',methods=['GET','POST'])
def cv_find():
import csv
if request.method=='POST':
ls=[]
find=request.form['find']
text=request.files['actual-btn']
#filename=text.split(".")
text = TextIOWrapper(text, encoding='utf-8')
cv_reader1= csv.reader(text)
for line in cv_reader1:
for word in line:
a=word.split(';')
ls=ls+a
for item in ls:
if item in find:
flash('Found')
break
else:
flash('Not Found')
return render_template("csv-find.html")
@app.route('/csv-replace.html',methods=['GET','POST'])
def csv_replace():
import csv
text = b""
if request.method=='POST':
org_wrd=request.form['Replace']
rlc_wrd=request.form['nwwrd']
txt= request.files['actual-btn']
txt = TextIOWrapper(txt, encoding='utf-8')
csv_reader=csv.reader(txt)
text = ''.join([str(i) for i in csv_reader])
text = text.replace('{}'.format(org_wrd),'{}'.format(rlc_wrd))
x = open("output.csv","w")
x.writelines(text)
x.close()
'''text = bytes(text, 'utf-8')'''
text = text.encode('utf-8')
return render_template("csv-replace.html",op=text.decode('utf-8'))
#return render_template("csv-replace.html",op=base64.b64encode(text).decode('utf-8'))
@app.route('/xlsx-find.html',methods=['GET','POST'])
def xlx_find():
import xlrd
#from xlrd import open_workbook
if request.method=='POST':
find=request.form['find']
txt=request.files['actual-btn']
#filename=text.split(".")
#txt = TextIOWrapper(txt, encoding='utf-8')
#txt.save(os.path.join( secure_filename(txt.filename))
filename = secure_filename(txt.filename)
new_path = os.path.abspath(filename)
w = xlrd.open_workbook(new_path)
sheet = w.sheet_by_index(0)
count=0
for i in range(0,sheet.nrows):
for j in range(0,sheet.ncols):
if sheet.cell_value(i,j) == find:
flash(f"Word Found at the position ,{i}*{j}")
count=count+1
if count==0:
flash("Word is not Found")
return render_template("xlsx-find.html")
from flask import send_file
@app.route('/download/<filename>',methods=['GET'])
def download(filename):
return send_file(filename, as_attachment=True)
@app.route('/xlsx-replace.html',methods=['GET','POST'])
def xlx_replace():
filename = ""
if request.method=='POST':
org_wrd=request.form['Replace']
rlc_wrd=request.form['nwwrd']
txt= request.files['actual-btn']
#txt = TextIOWrapper(txt, encoding='utf-8')
filename = secure_filename(txt.filename)
new_path = os.path.abspath(filename)
workbook = openpyxl.load_workbook(new_path)
worksheet = workbook["Sheet1" ]
number_of_rows = worksheet.max_row
number_of_columns = worksheet.max_column
replacementTextKeyPairs ={'{}'.format(org_wrd):'{}'.format(rlc_wrd)}
for i in range(number_of_columns):
for k in range(number_of_rows):
cellValue = str(worksheet[get_column_letter(i+1)+str(k+1)].value)
for key in replacementTextKeyPairs.keys():
if str(cellValue) == key:
newCellValue = replacementTextKeyPairs.get(key)
worksheet[get_column_letter(i+1)+str(k+1)] = str(newCellValue)
workbook.save('o{}'.format(filename))
new_path1 = os.path.abspath("o" + filename)
data = open(new_path1, 'rb').read()
#base64_encoded = base64.b64encode(data).decode('UTF-8')
#output = base64_encoded.encode('utf-8')
#worksheet = bytes(worksheet, 'utf-8')
return render_template("xlsx-replace.html",op=f"o{filename}")
#return render_template("xlsx-replace.html",op='output.xlsx')
if __name__ == '__main__':
app.run(debug=True,port=80) |
py | 7df8efd61ae412980b0dcf474fc40f16665f2a62 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_common
from neutron.objects import base as objects_base
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import callbacks
from neutron.services.trunk import constants
from neutron.services.trunk import drivers
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import rules
from neutron.services.trunk.seg_types import validators
LOG = logging.getLogger(__name__)
@resource_extend.has_resource_extenders
@registry.has_registry_receivers
class TrunkPlugin(service_base.ServicePluginBase,
common_db_mixin.CommonDbMixin):
supported_extension_aliases = ["trunk", "trunk-details"]
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
self._rpc_backend = None
self._drivers = []
self._segmentation_types = {}
self._interfaces = set()
self._agent_types = set()
drivers.register()
registry.subscribe(rules.enforce_port_deletion_rules,
resources.PORT, events.BEFORE_DELETE)
registry.publish(constants.TRUNK_PLUGIN, events.AFTER_INIT, self)
for driver in self._drivers:
LOG.debug('Trunk plugin loaded with driver %s', driver.name)
self.check_compatibility()
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_trunk_details(port_res, port_db):
"""Add trunk details to a port."""
if port_db.trunk_port:
subports = {
x.port_id: {'segmentation_id': x.segmentation_id,
'segmentation_type': x.segmentation_type,
'port_id': x.port_id}
for x in port_db.trunk_port.sub_ports
}
core_plugin = directory.get_plugin()
ports = core_plugin.get_ports(
context.get_admin_context(), filters={'id': subports})
for port in ports:
subports[port['id']]['mac_address'] = port['mac_address']
trunk_details = {'trunk_id': port_db.trunk_port.id,
'sub_ports': [x for x in subports.values()]}
port_res['trunk_details'] = trunk_details
return port_res
def check_compatibility(self):
"""Verify the plugin can load correctly and fail otherwise."""
self.check_driver_compatibility()
self.check_segmentation_compatibility()
def check_driver_compatibility(self):
"""Fail to load if no compatible driver is found."""
if not any([driver.is_loaded for driver in self._drivers]):
raise trunk_exc.IncompatibleTrunkPluginConfiguration()
def check_segmentation_compatibility(self):
"""Fail to load if segmentation type conflicts are found.
In multi-driver deployments each loaded driver must support the same
set of segmentation types consistently.
"""
# Get list of segmentation types for the loaded drivers.
list_of_driver_seg_types = [
set(driver.segmentation_types) for driver in self._drivers
if driver.is_loaded
]
# If not empty, check that there is at least one we can use.
compat_segmentation_types = set()
if list_of_driver_seg_types:
compat_segmentation_types = (
set.intersection(*list_of_driver_seg_types))
if not compat_segmentation_types:
raise trunk_exc.IncompatibleDriverSegmentationTypes()
# If there is at least one, make sure the validator is defined.
try:
for seg_type in compat_segmentation_types:
self.add_segmentation_type(
seg_type, validators.get_validator(seg_type))
except KeyError:
raise trunk_exc.SegmentationTypeValidatorNotFound(
seg_type=seg_type)
def set_rpc_backend(self, backend):
self._rpc_backend = backend
def is_rpc_enabled(self):
return self._rpc_backend is not None
def register_driver(self, driver):
"""Register driver with trunk plugin."""
if driver.agent_type:
self._agent_types.add(driver.agent_type)
self._interfaces = self._interfaces | set(driver.interfaces)
self._drivers.append(driver)
@property
def registered_drivers(self):
"""The registered drivers."""
return self._drivers
@property
def supported_interfaces(self):
"""A set of supported interfaces."""
return self._interfaces
@property
def supported_agent_types(self):
"""A set of supported agent types."""
return self._agent_types
def add_segmentation_type(self, segmentation_type, id_validator):
self._segmentation_types[segmentation_type] = id_validator
LOG.debug('Added support for segmentation type %s', segmentation_type)
def validate(self, context, trunk):
"""Return a valid trunk or raises an error if unable to do so."""
trunk_details = trunk
trunk_validator = rules.TrunkPortValidator(trunk['port_id'])
trunk_details['port_id'] = trunk_validator.validate(context)
subports_validator = rules.SubPortsValidator(
self._segmentation_types, trunk['sub_ports'], trunk['port_id'])
trunk_details['sub_ports'] = subports_validator.validate(context)
return trunk_details
def get_plugin_description(self):
return "Trunk port service plugin"
@classmethod
def get_plugin_type(cls):
return "trunk"
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_trunk(self, context, trunk_id, fields=None):
"""Return information for the specified trunk."""
return self._get_trunk(context, trunk_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_trunks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Return information for available trunks."""
filters = filters or {}
pager = objects_base.Pager(sorts=sorts, limit=limit,
page_reverse=page_reverse, marker=marker)
return trunk_objects.Trunk.get_objects(context, _pager=pager,
**filters)
@db_base_plugin_common.convert_result_to_dict
def create_trunk(self, context, trunk):
"""Create a trunk."""
trunk = self.validate(context, trunk['trunk'])
sub_ports = [trunk_objects.SubPort(
context=context,
port_id=p['port_id'],
segmentation_id=p['segmentation_id'],
segmentation_type=p['segmentation_type'])
for p in trunk['sub_ports']]
admin_state_up = trunk.get('admin_state_up', True)
# NOTE(status_police): a trunk is created in DOWN status. Depending
# on the nature of the create request, a driver may set the status
# immediately to ACTIVE if no physical provisioning is required.
# Otherwise a transition to BUILD (or ERROR) should be expected
# depending on how the driver reacts. PRECOMMIT failures prevent the
# trunk from being created altogether.
trunk_description = trunk.get('description', "")
trunk_obj = trunk_objects.Trunk(context=context,
admin_state_up=admin_state_up,
id=uuidutils.generate_uuid(),
name=trunk.get('name', ""),
description=trunk_description,
project_id=trunk['tenant_id'],
port_id=trunk['port_id'],
status=constants.DOWN_STATUS,
sub_ports=sub_ports)
with db_api.autonested_transaction(context.session):
trunk_obj.create()
payload = callbacks.TrunkPayload(context, trunk_obj.id,
current_trunk=trunk_obj)
registry.notify(
constants.TRUNK, events.PRECOMMIT_CREATE, self,
payload=payload)
registry.notify(
constants.TRUNK, events.AFTER_CREATE, self, payload=payload)
return trunk_obj
@db_base_plugin_common.convert_result_to_dict
def update_trunk(self, context, trunk_id, trunk):
"""Update information for the specified trunk."""
trunk_data = trunk['trunk']
with db_api.autonested_transaction(context.session):
trunk_obj = self._get_trunk(context, trunk_id)
original_trunk = copy.deepcopy(trunk_obj)
# NOTE(status_police): a trunk status should not change during an
# update_trunk(), even in face of PRECOMMIT failures. This is
# because only name and admin_state_up are being affected, and
# these are DB properties only.
trunk_obj.update_fields(trunk_data, reset_changes=True)
trunk_obj.update()
payload = events.DBEventPayload(
context, resource_id=trunk_id, states=(original_trunk,),
desired_state=trunk_obj, request_body=trunk_data)
registry.publish(constants.TRUNK, events.PRECOMMIT_UPDATE, self,
payload=payload)
registry.notify(constants.TRUNK, events.AFTER_UPDATE, self,
payload=callbacks.TrunkPayload(
context, trunk_id,
original_trunk=original_trunk,
current_trunk=trunk_obj))
return trunk_obj
def delete_trunk(self, context, trunk_id):
"""Delete the specified trunk."""
with db_api.autonested_transaction(context.session):
trunk = self._get_trunk(context, trunk_id)
rules.trunk_can_be_managed(context, trunk)
trunk_port_validator = rules.TrunkPortValidator(trunk.port_id)
if not trunk_port_validator.is_bound(context):
# NOTE(status_police): when a trunk is deleted, the logical
# object disappears from the datastore, therefore there is no
# status transition involved. If PRECOMMIT failures occur,
# the trunk remains in the status where it was.
trunk.delete()
payload = callbacks.TrunkPayload(context, trunk_id,
original_trunk=trunk)
registry.notify(constants.TRUNK, events.PRECOMMIT_DELETE, self,
payload=payload)
else:
raise trunk_exc.TrunkInUse(trunk_id=trunk_id)
registry.notify(constants.TRUNK, events.AFTER_DELETE, self,
payload=payload)
@db_base_plugin_common.convert_result_to_dict
def add_subports(self, context, trunk_id, subports):
"""Add one or more subports to trunk."""
with db_api.autonested_transaction(context.session):
trunk = self._get_trunk(context, trunk_id)
# Check for basic validation since the request body here is not
# automatically validated by the API layer.
subports = subports['sub_ports']
subports_validator = rules.SubPortsValidator(
self._segmentation_types, subports, trunk['port_id'])
subports = subports_validator.validate(
context, basic_validation=True)
added_subports = []
rules.trunk_can_be_managed(context, trunk)
original_trunk = copy.deepcopy(trunk)
# NOTE(status_police): the trunk status should transition to
# DOWN (and finally in ACTIVE or ERROR), only if it is not in
# ERROR status already. A user should attempt to resolve the ERROR
# condition before adding more subports to the trunk. Should a
# trunk be in DOWN or BUILD state (e.g. when dealing with
# multiple concurrent requests), the status is still forced to
# DOWN and thus can potentially overwrite an interleaving state
# change to ACTIVE. Eventually the driver should bring the status
# back to ACTIVE or ERROR.
if trunk.status == constants.ERROR_STATUS:
raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
else:
trunk.update(status=constants.DOWN_STATUS)
for subport in subports:
obj = trunk_objects.SubPort(
context=context,
trunk_id=trunk_id,
port_id=subport['port_id'],
segmentation_type=subport['segmentation_type'],
segmentation_id=subport['segmentation_id'])
obj.create()
trunk['sub_ports'].append(obj)
added_subports.append(obj)
payload = callbacks.TrunkPayload(context, trunk_id,
current_trunk=trunk,
original_trunk=original_trunk,
subports=added_subports)
if added_subports:
registry.notify(constants.SUBPORTS, events.PRECOMMIT_CREATE,
self, payload=payload)
if added_subports:
registry.notify(
constants.SUBPORTS, events.AFTER_CREATE, self, payload=payload)
return trunk
@db_base_plugin_common.convert_result_to_dict
def remove_subports(self, context, trunk_id, subports):
"""Remove one or more subports from trunk."""
subports = subports['sub_ports']
with db_api.autonested_transaction(context.session):
trunk = self._get_trunk(context, trunk_id)
original_trunk = copy.deepcopy(trunk)
rules.trunk_can_be_managed(context, trunk)
subports_validator = rules.SubPortsValidator(
self._segmentation_types, subports)
# the subports are being removed, therefore we do not need to
# enforce any specific trunk rules, other than basic validation
# of the request body.
subports = subports_validator.validate(
context, basic_validation=True,
trunk_validation=False)
current_subports = {p.port_id: p for p in trunk.sub_ports}
removed_subports = []
for subport in subports:
subport_obj = current_subports.pop(subport['port_id'], None)
if not subport_obj:
raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
port_id=subport['port_id'])
subport_obj.delete()
removed_subports.append(subport_obj)
del trunk.sub_ports[:]
trunk.sub_ports.extend(current_subports.values())
# NOTE(status_police): the trunk status should transition to
# DOWN irrespective of the status in which it is in to allow
# the user to resolve potential conflicts due to prior add_subports
# operations.
# Should a trunk be in DOWN or BUILD state (e.g. when dealing
# with multiple concurrent requests), the status is still forced
# to DOWN. See add_subports() for more details.
trunk.update(status=constants.DOWN_STATUS)
payload = callbacks.TrunkPayload(context, trunk_id,
current_trunk=trunk,
original_trunk=original_trunk,
subports=removed_subports)
if removed_subports:
registry.notify(constants.SUBPORTS, events.PRECOMMIT_DELETE,
self, payload=payload)
if removed_subports:
registry.notify(
constants.SUBPORTS, events.AFTER_DELETE, self, payload=payload)
return trunk
@db_base_plugin_common.filter_fields
def get_subports(self, context, trunk_id, fields=None):
"""Return subports for the specified trunk."""
trunk = self.get_trunk(context, trunk_id)
return {'sub_ports': trunk['sub_ports']}
def _get_trunk(self, context, trunk_id):
"""Return the trunk object or raise if not found."""
obj = trunk_objects.Trunk.get_object(context, id=trunk_id)
if obj is None:
raise trunk_exc.TrunkNotFound(trunk_id=trunk_id)
return obj
# NOTE(tidwellr) Consider keying off of PRECOMMIT_UPDATE if we find
# AFTER_UPDATE to be problematic for setting trunk status when a
# a parent port becomes unbound.
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
def _trigger_trunk_status_change(self, resource, event, trigger, **kwargs):
updated_port = kwargs['port']
trunk_details = updated_port.get('trunk_details')
# If no trunk_details, the port is not the parent of a trunk.
if not trunk_details:
return
context = kwargs['context']
original_port = kwargs['original_port']
orig_vif_type = original_port.get(portbindings.VIF_TYPE)
new_vif_type = updated_port.get(portbindings.VIF_TYPE)
vif_type_changed = orig_vif_type != new_vif_type
if vif_type_changed and new_vif_type == portbindings.VIF_TYPE_UNBOUND:
trunk_id = trunk_details['trunk_id']
# NOTE(status_police) Trunk status goes to DOWN when the parent
# port is unbound. This means there are no more physical resources
# associated with the logical resource.
self.update_trunk(context, trunk_id,
{'trunk': {'status': constants.DOWN_STATUS}})
|
py | 7df8efdfbd6caa67c99c10f104a80072c82adfda | # Copyright 2009-2012 Yelp
# Copyright 2013 David Marin
# Copyright 2014 Shusen Liu
# Copyright 2015-2016 Yelp
# Copyright 2017 Yelp and Contributors
# Copyright 2018 Yelp
# Copyright 2019 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the hadoop job runner."""
import getpass
import os
import os.path
from io import BytesIO
from subprocess import check_call
from subprocess import PIPE
from unittest import skipIf
import mrjob.step
from mrjob.conf import combine_dicts
from mrjob.fs.hadoop import HadoopFilesystem
from mrjob.hadoop import HadoopJobRunner
from mrjob.hadoop import fully_qualify_hdfs_path
from mrjob.hadoop import pty
from mrjob.step import StepFailedException
from tests.mockhadoop import add_mock_hadoop_counters
from tests.mockhadoop import add_mock_hadoop_output
from tests.mockhadoop import get_mock_hadoop_cmd_args
from tests.mockhadoop import get_mock_hdfs_root
from tests.mockhadoop import MockHadoopTestCase
from tests.mr_jar_and_streaming import MRJarAndStreaming
from tests.mr_jar_with_generic_args import MRJarWithGenericArgs
from tests.mr_just_a_jar import MRJustAJar
from tests.mr_null_spark import MRNullSpark
from tests.mr_spark_jar import MRSparkJar
from tests.mr_spark_script import MRSparkScript
from tests.mr_streaming_and_spark import MRStreamingAndSpark
from tests.mr_two_step_hadoop_format_job import MRTwoStepJob
from tests.mr_word_count import MRWordCount
from tests.py2 import Mock
from tests.py2 import call
from tests.py2 import patch
from tests.sandbox import BasicTestCase
from tests.sandbox import EmptyMrjobConfTestCase
from tests.sandbox import SandboxedTestCase
from tests.test_bin import PYTHON_BIN
if pty is None:
# some tests should run even if pty is missing
pty = Mock()
class TestFullyQualifyHDFSPath(BasicTestCase):
def test_empty(self):
with patch('getpass.getuser') as getuser:
getuser.return_value = 'dave'
self.assertEqual(fully_qualify_hdfs_path(''), 'hdfs:///user/dave/')
def test_relative_path(self):
with patch('getpass.getuser') as getuser:
getuser.return_value = 'dave'
self.assertEqual(fully_qualify_hdfs_path('path/to/chocolate'),
'hdfs:///user/dave/path/to/chocolate')
def test_absolute_path(self):
self.assertEqual(fully_qualify_hdfs_path('/path/to/cheese'),
'hdfs:///path/to/cheese')
def test_hdfs_uri(self):
self.assertEqual(fully_qualify_hdfs_path('hdfs://host/path/'),
'hdfs://host/path/')
def test_s3n_uri(self):
self.assertEqual(fully_qualify_hdfs_path('s3n://bucket/oh/noes'),
's3n://bucket/oh/noes')
def test_s3a_uri(self):
self.assertEqual(fully_qualify_hdfs_path('s3a://bucket/oh/noes'),
's3a://bucket/oh/noes')
def test_other_uri(self):
self.assertEqual(fully_qualify_hdfs_path('foo://bar/baz'),
'foo://bar/baz')
class RunnerFullyQualifiesOutputPathsTestCase(MockHadoopTestCase):
def test_output_dir(self):
runner = HadoopJobRunner(output_dir='/path/to/output')
self.assertEqual(runner._output_dir, 'hdfs:///path/to/output')
def test_step_output_dir(self):
runner = HadoopJobRunner(output_dir='/path/to/step-output')
self.assertEqual(runner._output_dir, 'hdfs:///path/to/step-output')
class HadoopStreamingJarTestCase(SandboxedTestCase):
def setUp(self):
super(HadoopStreamingJarTestCase, self).setUp()
self.mock_paths = []
def mock_ls(path): # don't bother to support globs
return (p for p in sorted(self.mock_paths) if p.startswith(path))
self.start(patch('mrjob.fs.local.LocalFilesystem.ls',
side_effect=mock_ls))
os.environ.clear()
self.runner = HadoopJobRunner()
def test_empty_fs(self):
self.assertEqual(self.runner._find_hadoop_streaming_jar(), None)
# tests of well-known environment variables
def test_hadoop_prefix(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op/prefix'
self.mock_paths.append('/ha/do/op/prefix/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/prefix/hadoop-streaming.jar')
def test_hadoop_prefix_beats_hadoop_home(self):
os.environ['HADOOP_HOME'] = '/ha/do/op/home'
self.mock_paths.append('/ha/do/op/home/hadoop-streaming.jar')
self.test_hadoop_prefix()
def test_hadoop_home(self):
os.environ['HADOOP_HOME'] = '/ha/do/op/home'
self.mock_paths.append('/ha/do/op/home/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/home/hadoop-streaming.jar')
def test_hadoop_home_beats_hadoop_install(self):
os.environ['HADOOP_INSTALL'] = '/ha/do/op/install'
self.mock_paths.append('/ha/do/op/install/hadoop-streaming.jar')
self.test_hadoop_home()
def test_hadoop_install(self):
os.environ['HADOOP_INSTALL'] = '/ha/do/op/install'
self.mock_paths.append('/ha/do/op/install/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/install/hadoop-streaming.jar')
def test_hadoop_install_beats_hadoop_mapred_home(self):
os.environ['HADOOP_MAPRED_HOME'] = '/ha/do/op/mapred-home'
self.mock_paths.append('/ha/do/op/mapred-home/hadoop-streaming.jar')
self.test_hadoop_install()
def test_hadoop_mapred_home(self):
os.environ['HADOOP_MAPRED_HOME'] = '/ha/do/op/mapred-home'
self.mock_paths.append('/ha/do/op/mapred-home/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/mapred-home/hadoop-streaming.jar')
def test_hadoop_mapred_home_beats_infer_from_hadoop_bin(self):
self.runner = HadoopJobRunner(
hadoop_bin=['/ha/do/op/bin-parent/bin/hadoop'])
self.mock_paths.append('/ha/do/op/bin-parent/hadoop-streaming.jar')
self.test_hadoop_mapred_home()
# infer from hadoop_bin
def test_infer_from_hadoop_bin_parent_dir(self):
self.runner = HadoopJobRunner(
hadoop_bin=['/ha/do/op/bin-parent/bin/hadoop'])
self.mock_paths.append('/ha/do/op/bin-parent/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/bin-parent/hadoop-streaming.jar')
def test_hadoop_bin_beats_hadoop_anything_home(self):
os.environ['HADOOP_ANYTHING_HOME'] = '/ha/do/op/anything-home'
self.mock_paths.append('/ha/do/op/anything-home/hadoop-streaming.jar')
self.test_infer_from_hadoop_bin_parent_dir()
def test_dont_infer_from_bin_hadoop(self):
self.runner = HadoopJobRunner(hadoop_bin=['/bin/hadoop'])
self.mock_paths.append('/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(), None)
def test_dont_infer_from_usr_bin_hadoop(self):
self.runner = HadoopJobRunner(hadoop_bin=['/usr/bin/hadoop'])
self.mock_paths.append('/usr/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(), None)
def test_dont_infer_from_usr_local_bin_hadoop(self):
self.runner = HadoopJobRunner(hadoop_bin=['/usr/local/bin/hadoop'])
self.mock_paths.append('/usr/local/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(), None)
def test_infer_from_hadoop_bin_realpath(self):
with patch('posixpath.realpath', return_value='/ha/do/op/bin'):
self.runner = HadoopJobRunner(hadoop_bin=['/usr/bin/hadoop'])
self.mock_paths.append('/ha/do/op/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/hadoop-streaming.jar')
# tests of fallback environment variables ($HADOOP_*_HOME)
def test_hadoop_anything_home(self):
os.environ['HADOOP_WHATEVER_HOME'] = '/ha/do/op/whatever-home'
self.mock_paths.append('/ha/do/op/whatever-home/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/whatever-home/hadoop-streaming.jar')
# $HADOOP_ANYTHING_HOME comes before $HADOOP_WHATEVER_HOME
os.environ['HADOOP_ANYTHING_HOME'] = '/ha/do/op/anything-home'
self.mock_paths.append('/ha/do/op/anything-home/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/anything-home/hadoop-streaming.jar')
def test_hadoop_anything_home_beats_hard_coded_paths(self):
self.mock_paths.append('/home/hadoop/contrib/hadoop-streaming.jar')
self.mock_paths.append(
'/usr/lib/hadoop-mapreduce/hadoop-streaming.jar')
self.test_hadoop_anything_home()
# hard-coded paths (for Hadoop inside EMR)
def test_hard_coded_emr_paths(self):
self.mock_paths.append(
'/usr/lib/hadoop-mapreduce/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/usr/lib/hadoop-mapreduce/hadoop-streaming.jar')
# /home/hadoop/contrib takes precedence
self.mock_paths.append('/home/hadoop/contrib/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/home/hadoop/contrib/hadoop-streaming.jar')
# invalid environment variables
def test_other_environment_variable(self):
os.environ['HADOOP_YARN_MRJOB_DIR'] = '/ha/do/op/yarn-mrjob-dir'
self.mock_paths.append(
'/ha/do/op/yarn-mrjob-dir/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(), None)
# alternate jar names and paths
def test_subdirs(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
self.mock_paths.append('/ha/do/op/contrib/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/contrib/hadoop-streaming.jar')
def test_hadoop_streaming_jar_name_with_version(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
self.mock_paths.append('/ha/do/op/hadoop-streaming-2.6.0-amzn-0.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/hadoop-streaming-2.6.0-amzn-0.jar')
def test_skip_hadoop_streaming_source_jar(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
# Googled it; it really is named *-sources.jar, not *-source.jar
self.mock_paths.append(
'/ha/do/op/hadoop-streaming-2.0.0-mr1-cdh4.3.1-sources.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(), None)
# multiple matching jars in same directory
def test_pick_shortest_name(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
self.mock_paths.append('/ha/do/op/hadoop-streaming-1.0.3.jar')
self.mock_paths.append('/ha/do/op/hadoop-streaming.jar')
# hadoop-streaming-1.0.3.jar comes first in alphabetical order
self.assertEqual(sorted(self.mock_paths), self.mock_paths)
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/hadoop-streaming.jar')
def test_pick_shallowest_subpath(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
self.mock_paths.append('/ha/do/op/hadoop-streaming-1.0.3.jar')
self.mock_paths.append('/ha/do/op/old/hadoop-streaming.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/hadoop-streaming-1.0.3.jar')
def test_fall_back_to_alphabetical_order(self):
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
self.mock_paths.append('/ha/do/op/hadoop-streaming-a.jar')
self.mock_paths.append('/ha/do/op/hadoop-streaming-b.jar')
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/hadoop-streaming-a.jar')
# sanity-check that directory order overrides path sort order
def test_directory_order_overrides_path_sort_order(self):
os.environ['HADOOP_HOME'] = '/ha/do/op/a'
os.environ['HADOOP_PREFIX'] = '/ha/do/op/b'
self.mock_paths.append('/ha/do/op/a/hadoop-streaming-a.jar')
self.mock_paths.append('/ha/do/op/b/hadoop-streaming-b.jar')
# $HADOOP_PREFIX takes precendence over $HADOOP_HOME, so sort
# order doesn't matter
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/b/hadoop-streaming-b.jar')
# now search in parent dir (/ha/do/op) to invoke sort order
os.environ['HADOOP_PREFIX'] = '/ha/do/op'
self.assertEqual(self.runner._find_hadoop_streaming_jar(),
'/ha/do/op/a/hadoop-streaming-a.jar')
class HadoopLogDirsTestCase(SandboxedTestCase):
def setUp(self):
super(HadoopLogDirsTestCase, self).setUp()
os.environ.clear()
self.mock_hadoop_version = '2.7.0'
# the result of _hadoop_dir(). This handles non-log-specific
# environment variables, such as $HADOOP_PREFIX, and also guesses
# based on the path of the Hadoop binary
self.mock_hadoop_dirs = []
def mock_get_hadoop_version():
return self.mock_hadoop_version
def mock_hadoop_dirs_method():
return (d for d in self.mock_hadoop_dirs)
self.start(patch('mrjob.hadoop.HadoopJobRunner.get_hadoop_version',
side_effect=mock_get_hadoop_version))
self.start(patch('mrjob.hadoop.HadoopJobRunner._hadoop_dirs',
side_effect=mock_hadoop_dirs_method))
self.runner = HadoopJobRunner()
def test_empty(self):
self.assertEqual(list(self.runner._hadoop_log_dirs()),
['hdfs:///tmp/hadoop-yarn/staging',
'/var/log/hadoop-yarn',
'/mnt/var/log/hadoop-yarn',
'/var/log/hadoop',
'/mnt/var/log/hadoop'])
def test_precedence(self):
os.environ['HADOOP_LOG_DIR'] = '/path/to/hadoop-log-dir'
os.environ['YARN_LOG_DIR'] = '/path/to/yarn-log-dir'
self.mock_hadoop_dirs = ['/path/to/hadoop-prefix',
'/path/to/hadoop-home']
self.assertEqual(
list(self.runner._hadoop_log_dirs(output_dir='hdfs:///output/')),
['/path/to/hadoop-log-dir',
'/path/to/yarn-log-dir',
'hdfs:///tmp/hadoop-yarn/staging',
'hdfs:///output/_logs',
'/path/to/hadoop-prefix/logs',
'/path/to/hadoop-home/logs',
'/var/log/hadoop-yarn',
'/mnt/var/log/hadoop-yarn',
'/var/log/hadoop',
'/mnt/var/log/hadoop'])
def test_hadoop_log_dirs_opt(self):
self.runner = HadoopJobRunner(hadoop_log_dirs=['/logs1', '/logs2'])
os.environ['HADOOP_LOG_DIR'] = '/path/to/hadoop-log-dir'
# setting hadoop_log_dirs short-circuits automatic discovery of logs
self.assertEqual(
list(self.runner._hadoop_log_dirs()),
['/logs1', '/logs2'])
def test_need_yarn_for_yarn_log_dir_and_hdfs_log_dir(self):
os.environ['YARN_LOG_DIR'] = '/path/to/yarn-log-dir'
self.mock_hadoop_version = '2.0.0'
self.assertEqual(list(self.runner._hadoop_log_dirs()),
['/path/to/yarn-log-dir',
'hdfs:///tmp/hadoop-yarn/staging',
'/var/log/hadoop-yarn',
'/mnt/var/log/hadoop-yarn',
'/var/log/hadoop',
'/mnt/var/log/hadoop'])
self.mock_hadoop_version = '1.0.3'
self.assertEqual(list(self.runner._hadoop_log_dirs()),
['/var/log/hadoop',
'/mnt/var/log/hadoop'])
class StreamingLogDirsTestCase(SandboxedTestCase):
# tests for the _stream_*_log_dirs() methods, mocking out
# _hadoop_log_dirs(), which is tested above
def setUp(self):
super(StreamingLogDirsTestCase, self).setUp()
self.log = self.start(patch('mrjob.hadoop.log'))
self.runner = HadoopJobRunner()
self.runner._hadoop_log_dirs = Mock(return_value=[])
self.runner.fs.exists = Mock(return_value=True)
self.log.reset_mock() # ignore logging from HadoopJobRunner init
class StreamHistoryLogDirsTestCase(StreamingLogDirsTestCase):
def test_empty(self):
results = self.runner._stream_history_log_dirs()
self.assertFalse(self.log.info.called)
self.assertRaises(StopIteration, next, results)
def test_basic(self):
self.runner._hadoop_log_dirs.return_value = [
'/mnt/var/logs/hadoop', 'hdfs:///logs']
results = self.runner._stream_history_log_dirs()
self.assertFalse(self.log.info.called)
self.assertEqual(next(results), ['/mnt/var/logs/hadoop'])
self.assertEqual(self.log.info.call_count, 1)
self.assertIn('/mnt/var/logs/hadoop', self.log.info.call_args[0][0])
self.assertEqual(next(results), ['hdfs:///logs'])
self.assertEqual(self.log.info.call_count, 2)
self.assertIn('hdfs:///logs', self.log.info.call_args[0][0])
self.assertRaises(StopIteration, next, results)
def test_output_dir(self):
output_dir = 'hdfs:///path/to/output'
self.runner._hadoop_log_dirs.return_value = [output_dir]
results = self.runner._stream_history_log_dirs(output_dir=output_dir)
self.assertEqual(next(results), [output_dir])
self.runner._hadoop_log_dirs.assert_called_with(output_dir=output_dir)
self.assertRaises(StopIteration, next, results)
def test_fs_exists(self):
self.runner._hadoop_log_dirs.return_value = [
'/mnt/var/logs/hadoop', 'hdfs:///logs']
self.runner.fs.exists.return_value = False
results = self.runner._stream_history_log_dirs()
self.assertRaises(StopIteration, next, results)
def test_io_error_from_fs_exists(self):
self.runner._hadoop_log_dirs.return_value = [
'hdfs:///tmp/output/_logs',
]
self.runner.fs.exists.side_effect = IOError
results = self.runner._stream_history_log_dirs()
self.assertRaises(StopIteration, next, results)
def test_no_read_logs(self):
self.runner._opts['read_logs'] = False
self.runner._hadoop_log_dirs.return_value = [
'/mnt/var/logs/hadoop', 'hdfs:///logs']
results = self.runner._stream_history_log_dirs()
self.assertRaises(StopIteration, next, results)
self.assertFalse(self.log.info.called)
self.assertFalse(self.runner._hadoop_log_dirs.called)
class StreamTaskLogDirsTestCase(StreamingLogDirsTestCase):
def test_empty(self):
results = self.runner._stream_task_log_dirs()
self.assertFalse(self.log.info.called)
self.assertRaises(StopIteration, next, results)
def test_basic(self):
self.runner._hadoop_log_dirs.return_value = [
'/mnt/var/logs/hadoop', 'hdfs:///logs']
results = self.runner._stream_task_log_dirs()
self.assertFalse(self.log.info.called)
self.assertEqual(next(results), ['/mnt/var/logs/hadoop/userlogs'])
self.assertEqual(self.log.info.call_count, 1)
self.assertIn('/mnt/var/logs/hadoop', self.log.info.call_args[0][0])
self.assertEqual(next(results), ['hdfs:///logs/userlogs'])
self.assertEqual(self.log.info.call_count, 2)
self.assertIn('hdfs:///logs/userlogs', self.log.info.call_args[0][0])
self.assertRaises(StopIteration, next, results)
def test_output_dir(self):
output_dir = 'hdfs:///path/to/output'
self.runner._hadoop_log_dirs.return_value = [output_dir]
results = self.runner._stream_task_log_dirs(output_dir=output_dir)
self.assertEqual(next(results), [output_dir + '/userlogs'])
self.runner._hadoop_log_dirs.assert_called_with(output_dir=output_dir)
self.assertRaises(StopIteration, next, results)
def test_application_id(self):
self.runner._hadoop_log_dirs.return_value = ['hdfs:///logs']
results = self.runner._stream_task_log_dirs(application_id='app_1')
self.assertEqual(next(results), ['hdfs:///logs/userlogs/app_1'])
def test_fs_exists(self):
self.runner._hadoop_log_dirs.return_value = [
'/mnt/var/logs/hadoop', 'hdfs:///logs']
self.runner.fs.exists.return_value = False
results = self.runner._stream_task_log_dirs()
self.assertRaises(StopIteration, next, results)
def test_io_error_from_fs_exists(self):
self.runner._hadoop_log_dirs.return_value = [
'hdfs:///tmp/output/_logs',
]
self.runner.fs.exists.side_effect = IOError
results = self.runner._stream_task_log_dirs()
self.assertRaises(StopIteration, next, results)
def test_no_read_logs(self):
self.runner._opts['read_logs'] = False
self.runner._hadoop_log_dirs.return_value = [
'/mnt/var/logs/hadoop', 'hdfs:///logs']
results = self.runner._stream_task_log_dirs()
self.assertRaises(StopIteration, next, results)
self.assertFalse(self.log.info.called)
self.assertFalse(self.runner._hadoop_log_dirs.called)
class GetHadoopVersionTestCase(MockHadoopTestCase):
def test_get_hadoop_version(self):
runner = HadoopJobRunner()
self.assertEqual(runner.get_hadoop_version(), '1.2.0')
def test_missing_hadoop_version(self):
with patch.dict('os.environ', MOCK_HADOOP_VERSION=''):
runner = HadoopJobRunner()
self.assertRaises(Exception, runner.get_hadoop_version)
class HadoopJobRunnerEndToEndTestCase(MockHadoopTestCase):
def _test_end_to_end(self, args=()):
# read from STDIN, a local file, and a remote file
stdin = BytesIO(b'foo\nbar\n')
local_input_path = os.path.join(self.tmp_dir, 'input')
with open(local_input_path, 'w') as local_input_file:
local_input_file.write('bar\nqux\n')
input_to_upload = os.path.join(self.tmp_dir, 'remote_input')
with open(input_to_upload, 'w') as input_to_upload_file:
input_to_upload_file.write('foo\n')
remote_input_path = 'hdfs:///data/foo'
check_call([self.hadoop_bin,
'fs', '-put', input_to_upload, remote_input_path])
# add counters
add_mock_hadoop_counters({'foo': {'bar': 23}})
add_mock_hadoop_counters({'baz': {'qux': 42}})
# doesn't matter what the intermediate output is; just has to exist.
add_mock_hadoop_output([b''])
add_mock_hadoop_output([b'1\t"qux"\n2\t"bar"\n',
b'2\t"foo"\n5\tnull\n'])
mr_job = MRTwoStepJob([
'-r', 'hadoop', '-v',
'--no-conf', '--libjars', 'containsJars.jar',
'--hadoop-args=-verbose'] + list(args) +
['-', local_input_path, remote_input_path] +
['-D', 'x=y']
)
mr_job.sandbox(stdin=stdin)
local_tmp_dir = None
results = []
with mr_job.make_runner() as runner:
assert isinstance(runner, HadoopJobRunner)
runner.run()
results.extend(mr_job.parse_output(runner.cat_output()))
local_tmp_dir = runner._get_local_tmp_dir()
# make sure cleanup hasn't happened yet
assert os.path.exists(local_tmp_dir)
assert any(runner.fs.ls(runner.get_output_dir()))
# make sure we're writing to the correct path in HDFS
hdfs_root = get_mock_hdfs_root()
self.assertEqual(sorted(os.listdir(hdfs_root)), ['data', 'user'])
home_dir = os.path.join(hdfs_root, 'user', getpass.getuser())
self.assertEqual(os.listdir(home_dir), ['tmp'])
self.assertEqual(os.listdir(os.path.join(home_dir, 'tmp')),
['mrjob'])
self.assertEqual(runner._opts['hadoop_extra_args'],
['-verbose'])
# make sure mrjob.zip was uploaded
self.assertTrue(os.path.exists(runner._mrjob_zip_path))
self.assertIn(runner._mrjob_zip_path,
runner._upload_mgr.path_to_uri())
# make sure setup script exists, and that it adds mrjob.zip
# to PYTHONPATH
self.assertTrue(os.path.exists(runner._setup_wrapper_script_path))
self.assertIn(runner._setup_wrapper_script_path,
runner._working_dir_mgr.paths())
mrjob_zip_name = runner._working_dir_mgr.name(
'file', runner._mrjob_zip_path)
with open(runner._setup_wrapper_script_path) as wrapper:
self.assertTrue(any(
('export PYTHONPATH' in line and mrjob_zip_name in line)
for line in wrapper))
self.assertEqual(runner.counters(),
[{'foo': {'bar': 23}},
{'baz': {'qux': 42}}])
self.assertEqual(sorted(results),
[(1, 'qux'), (2, 'bar'), (2, 'foo'), (5, None)])
# make sure we called hadoop the way we expected
hadoop_cmd_args = get_mock_hadoop_cmd_args()
jar_cmd_args = [cmd_args for cmd_args in hadoop_cmd_args
if cmd_args[:1] == ['jar']]
self.assertEqual(len(jar_cmd_args), 2)
step_0_args, step_1_args = jar_cmd_args
# check input/output format
self.assertIn('-inputformat', step_0_args)
self.assertNotIn('-outputformat', step_0_args)
self.assertNotIn('-inputformat', step_1_args)
self.assertIn('-outputformat', step_1_args)
# make sure extra arg (-verbose) comes before mapper
for args in (step_0_args, step_1_args):
self.assertIn('-verbose', args)
self.assertIn('-mapper', args)
self.assertLess(args.index('-verbose'), args.index('-mapper'))
# make sure -libjars is set and comes before mapper
for args in (step_0_args, step_1_args):
self.assertIn('-libjars', args)
self.assertIn('containsJars.jar', args)
self.assertIn('-mapper', args)
self.assertLess(args.index('-libjars'), args.index('-mapper'))
# make sure -D (jobconf) made it through
self.assertIn('-D', step_0_args)
self.assertIn('x=y', step_0_args)
self.assertIn('-D', step_1_args)
# job overrides jobconf in step 1
self.assertIn('x=z', step_1_args)
# make sure cleanup happens
assert not os.path.exists(local_tmp_dir)
assert not any(runner.fs.ls(runner.get_output_dir()))
def test_end_to_end(self):
self._test_end_to_end()
def test_end_to_end_with_explicit_hadoop_bin(self):
self._test_end_to_end(['--hadoop-bin', self.hadoop_bin])
def test_end_to_end_without_pty_fork(self):
with patch.object(pty, 'fork', side_effect=OSError()):
self._test_end_to_end()
def test_end_to_end_with_disabled_input_path_check(self):
self._test_end_to_end(['--no-check-input-paths'])
def test_end_to_end_with_hadoop_2_0(self):
with patch.dict('os.environ', MOCK_HADOOP_VERSION='2.0.0'):
self._test_end_to_end()
class StreamingArgsTestCase(EmptyMrjobConfTestCase):
MRJOB_CONF_CONTENTS = {'runners': {'hadoop': {
'hadoop_streaming_jar': 'binks.jar.jar',
}}}
BASIC_HADOOP_ARGS = [
'hadoop',
'jar', '<streaming jar>',
'<upload args>',
]
BASIC_JOB_ARGS = [
'<hadoop args for step>',
'-input', '<hdfs step input files>',
'-output', '<hdfs step output dir>',
]
def setUp(self):
super(StreamingArgsTestCase, self).setUp()
self.runner = HadoopJobRunner(
hadoop_bin='hadoop', hadoop_streaming_jar='<streaming jar>',
mr_job_script='my_job.py', stdin=BytesIO())
self.runner._add_job_files_for_upload()
self.start(patch.object(self.runner, '_upload_args',
return_value=['<upload args>']))
self.start(patch.object(self.runner, '_hadoop_args_for_step',
return_value=['<hadoop args for step>']))
self.start(patch.object(self.runner, '_step_input_uris',
return_value=['<hdfs step input files>']))
self.start(patch.object(self.runner, '_step_output_uri',
return_value='<hdfs step output dir>'))
self.start(patch.object(HadoopFilesystem, 'get_hadoop_version',
return_value='2.7.1'))
self.runner._script_path = 'my_job.py'
def _assert_streaming_step(self, step, args):
self.runner._steps = [step]
self.assertEqual(
self.runner._args_for_streaming_step(0),
self._new_basic_args + args)
def test_basic_mapper(self):
self.runner._steps = [
{
'type': 'streaming',
'mapper': {
'type': 'script',
},
},
]
self.assertEqual(
self.runner._args_for_streaming_step(0),
(self.BASIC_HADOOP_ARGS +
['-D', 'mapreduce.job.reduces=0'] +
self.BASIC_JOB_ARGS + [
'-mapper',
PYTHON_BIN + ' my_job.py --step-num=0 --mapper']))
def test_basic_mapper_pre_yarn(self):
# use a different jobconf (-D) on pre-YARN
self.start(patch.object(HadoopFilesystem, 'get_hadoop_version',
return_value='1.0.3'))
self.runner._steps = [
{
'type': 'streaming',
'mapper': {
'type': 'script',
},
},
]
self.assertEqual(
self.runner._args_for_streaming_step(0),
(self.BASIC_HADOOP_ARGS +
['-D', 'mapred.reduce.tasks=0'] +
self.BASIC_JOB_ARGS + [
'-mapper',
PYTHON_BIN + ' my_job.py --step-num=0 --mapper']))
def test_basic_reducer(self):
self.runner._steps = [
{
'type': 'streaming',
'reducer': {
'type': 'script',
},
},
]
self.assertEqual(
self.runner._args_for_streaming_step(0),
(self.BASIC_HADOOP_ARGS + self.BASIC_JOB_ARGS + [
'-mapper',
'cat',
'-reducer',
PYTHON_BIN + ' my_job.py --step-num=0 --reducer']))
def test_pre_filters(self):
self.runner._steps = [
{
'type': 'streaming',
'mapper': {
'type': 'script',
'pre_filter': 'grep anything',
},
'combiner': {
'type': 'script',
'pre_filter': 'grep nothing',
},
'reducer': {
'type': 'script',
'pre_filter': 'grep something',
},
},
]
self.assertEqual(
self.runner._args_for_streaming_step(0),
(self.BASIC_HADOOP_ARGS + self.BASIC_JOB_ARGS + [
'-mapper',
"/bin/sh -ex -c 'grep anything | " + PYTHON_BIN +
" my_job.py --step-num=0 --mapper'",
'-combiner',
"/bin/sh -ex -c 'grep nothing | " + PYTHON_BIN +
" my_job.py --step-num=0 --combiner'",
'-reducer',
"/bin/sh -ex -c 'grep something | " + PYTHON_BIN +
" my_job.py --step-num=0 --reducer'"]))
def test_pre_filter_escaping(self):
# ESCAPE ALL THE THINGS!!!
self.runner._steps = [
{
'type': 'streaming',
'mapper': {
'type': 'script',
'pre_filter': "bash -c 'grep '\\''anything'\\'''",
},
},
]
self.assertEqual(
self.runner._args_for_streaming_step(0),
(self.BASIC_HADOOP_ARGS +
['-D', 'mapreduce.job.reduces=0'] +
self.BASIC_JOB_ARGS + [
'-mapper',
"/bin/sh -ex -c 'bash -c '\\''grep"
" '\\''\\'\\'''\\''anything'\\''\\'\\'''\\'''\\'' | " +
PYTHON_BIN +
" my_job.py --step-num=0 --mapper'"]))
class ArgsForJarStepTestCase(MockHadoopTestCase):
def test_local_jar(self):
fake_jar = self.makefile('fake.jar')
job = MRJustAJar(['-r', 'hadoop', '--jar', fake_jar])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._args_for_jar_step(0),
runner.get_hadoop_bin() +
['jar', fake_jar])
def test_hdfs_jar_uri(self):
# this could change, but for now, we pass URIs straight through
mock_hdfs_jar = os.path.join(get_mock_hdfs_root(), 'fake.jar')
open(mock_hdfs_jar, 'w').close()
jar_uri = 'hdfs:///fake.jar'
job = MRJustAJar(['-r', 'hadoop', '--jar', jar_uri])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._args_for_jar_step(0),
runner.get_hadoop_bin() +
['jar', jar_uri])
def test_no_generic_args_by_default(self):
# -D and --libjars are ignored unless you use GENERIC_ARGS. See #1863
fake_jar = self.makefile('fake.jar')
fake_libjar = self.makefile('fake_lib.jar')
job = MRJustAJar(
['-r', 'hadoop', '--jar', fake_jar,
'-D', 'foo=bar', '--libjars', fake_libjar])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._args_for_jar_step(0),
runner.get_hadoop_bin() +
['jar', fake_jar])
def test_generic_args_interpolation(self):
fake_jar = self.makefile('fake.jar')
fake_libjar = self.makefile('fake_lib.jar')
job = MRJarWithGenericArgs(
['-r', 'hadoop', '--jar', fake_jar,
'-D', 'foo=bar', '--libjars', fake_libjar])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._args_for_jar_step(0),
runner.get_hadoop_bin() +
['jar', fake_jar,
'before',
'-libjars', fake_libjar, '-D', 'foo=bar',
'after']
)
def test_input_output_interpolation(self):
# TODO: rewrite this to just check the step args (see #1482)
fake_jar = os.path.join(self.tmp_dir, 'fake.jar')
open(fake_jar, 'w').close()
input1 = os.path.join(self.tmp_dir, 'input1')
open(input1, 'w').close()
input2 = os.path.join(self.tmp_dir, 'input2')
open(input2, 'w').close()
job = MRJarAndStreaming(
['-r', 'hadoop', '--jar', fake_jar, input1, input2])
job.sandbox()
add_mock_hadoop_output([b'']) # need this for streaming step
with job.make_runner() as runner:
runner.run()
hadoop_cmd_args = get_mock_hadoop_cmd_args()
hadoop_jar_cmd_args = [args for args in hadoop_cmd_args if
args and args[0] == 'jar']
self.assertEqual(len(hadoop_jar_cmd_args), 2)
jar_args, streaming_args = hadoop_jar_cmd_args
self.assertEqual(len(jar_args), 5)
self.assertEqual(jar_args[0], 'jar')
self.assertEqual(jar_args[1], fake_jar)
self.assertEqual(jar_args[2], 'stuff')
# check input is interpolated
input_arg = ','.join(
runner._upload_mgr.uri(path) for path in (input1, input2))
self.assertEqual(jar_args[3], input_arg)
# check output of jar is input of next step
jar_output_arg = jar_args[4]
streaming_input_arg = streaming_args[
streaming_args.index('-input') + 1]
self.assertEqual(jar_output_arg, streaming_input_arg)
class SparkStepArgsTestCase(SandboxedTestCase):
MRJOB_CONF_CONTENTS = dict(runners=dict(hadoop=dict(
spark_submit_bin='spark-submit')))
def setUp(self):
super(SparkStepArgsTestCase, self).setUp()
# _spark_submit_args() is tested elsewhere
self.start(patch(
'mrjob.bin.MRJobBinRunner._spark_submit_args',
return_value=['<spark submit args>']))
def add_files_for_upload(self, runner):
# _args_for_step() needs both of these
runner._add_input_files_for_upload()
runner._add_job_files_for_upload()
def test_spark_step(self):
job = MRNullSpark([
'-r', 'hadoop',
])
job.sandbox()
with job.make_runner() as runner:
self.add_files_for_upload(runner)
self.assertEqual(runner._args_for_step(0), [
'spark-submit',
'<spark submit args>',
runner._script_path,
'--step-num=0',
'--spark',
','.join(runner._step_input_uris(0)),
runner._step_output_uri(0)
])
def test_spark_streaming_step(self):
job = MRSparkScript([
'-r', 'hadoop',
'--script', '/path/to/spark_script.py',
'--script-arg', 'foo',
'--script-arg', mrjob.step.OUTPUT,
'--script-arg', mrjob.step.INPUT,
])
job.sandbox()
with job.make_runner() as runner:
self.add_files_for_upload(runner)
self.assertEqual(runner._args_for_step(0), [
'spark-submit',
'<spark submit args>',
'/path/to/spark_script.py',
'foo',
runner._step_output_uri(0),
','.join(runner._step_input_uris(0)),
])
class EnvForStepTestCase(MockHadoopTestCase):
def setUp(self):
super(EnvForStepTestCase, self).setUp()
os.environ.clear() # for less noisy test failures
def test_streaming_step(self):
job = MRTwoStepJob(['-r', 'hadoop', '--cmdenv', 'FOO=bar'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._env_for_step(0),
dict(os.environ)
)
def test_jar_step(self):
job = MRJustAJar(['-r', 'hadoop', '--cmdenv', 'FOO=bar'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._env_for_step(0),
dict(os.environ)
)
def test_spark_step(self):
job = MRNullSpark(['-r', 'hadoop', '--cmdenv', 'FOO=bar'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._env_for_step(0),
combine_dicts(os.environ,
dict(FOO='bar', PYSPARK_PYTHON=PYTHON_BIN))
)
def test_spark_jar_step(self):
job = MRSparkJar(['-r', 'hadoop', '--cmdenv', 'FOO=bar'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._env_for_step(0),
combine_dicts(os.environ, dict(FOO='bar'))
)
def test_spark_script_step(self):
job = MRSparkScript(['-r', 'hadoop', '--cmdenv', 'FOO=bar'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(
runner._env_for_step(0),
combine_dicts(os.environ,
dict(FOO='bar', PYSPARK_PYTHON=PYTHON_BIN))
)
class RunJobInHadoopUsesEnvTestCase(MockHadoopTestCase):
def setUp(self):
super(RunJobInHadoopUsesEnvTestCase, self).setUp()
self.mock_args_for_step = self.start(patch(
'mrjob.hadoop.HadoopJobRunner._args_for_step',
return_value=['args', 'for', 'step']))
self.mock_env_for_step = self.start(patch(
'mrjob.hadoop.HadoopJobRunner._env_for_step',
return_value=dict(FOO='bar', BAZ='qux')))
self.mock_pty_fork = self.start(patch.object(
pty, 'fork', return_value=(0, None)))
# end test once we invoke Popen or execvpe()
self.mock_Popen = self.start(patch(
'mrjob.hadoop.Popen', side_effect=StopIteration))
self.mock_execvpe = self.start(patch(
'os.execvpe', side_effect=StopIteration))
# don't actually hard-exit the child, since we're not actually forking
self.mock_exit = self.start(patch('os._exit'))
def test_with_pty(self):
job = MRWordCount(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.assertRaises(StopIteration, runner._run_job_in_hadoop)
self.mock_execvpe.assert_called_once_with(
'args', ['args', 'for', 'step'], dict(FOO='bar', BAZ='qux'))
self.assertFalse(self.mock_Popen.called)
def test_without_pty(self):
self.mock_pty_fork.side_effect = OSError
job = MRWordCount(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.assertRaises(StopIteration, runner._run_job_in_hadoop)
self.mock_Popen.assert_called_once_with(
['args', 'for', 'step'],
stdout=PIPE, stderr=PIPE, env=dict(FOO='bar', BAZ='qux'))
self.assertFalse(self.mock_execvpe.called)
@skipIf(not (pty and hasattr(pty, 'fork')), 'no pty.fork()')
class BadHadoopBinAfterFork(MockHadoopTestCase):
# test what happens if os.execvpe() fails (see #2024)
# can't just set --hadoop-bin because this would break checking
# Hadoop version and uploading files. Instead, patch _args_for_step()
def patch_args_for_step(self, runner, bad_hadoop_bin):
real_args_for_step = runner._args_for_step
def args_for_step(*args, **kwargs):
args = real_args_for_step(*args, **kwargs)
return [bad_hadoop_bin] + args[1:]
self.start(patch.object(runner, '_args_for_step', args_for_step))
def test_no_such_file(self):
missing_hadoop_bin = os.path.join(self.tmp_dir, 'no-hadoop-here')
job = MRTwoStepJob(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.patch_args_for_step(runner, missing_hadoop_bin)
self.assertRaises(StepFailedException, runner.run)
def test_permissions_error(self):
nonexecutable_hadoop_bin = self.makefile('not-really-hadoop')
job = MRTwoStepJob(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.patch_args_for_step(runner, nonexecutable_hadoop_bin)
self.assertRaises(StepFailedException, runner.run)
def test_non_oserror_exception(self):
self.start(patch('os.execvpe', side_effect=KeyboardInterrupt))
job = MRTwoStepJob(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.assertRaises(StepFailedException, runner.run)
class SparkPyFilesTestCase(MockHadoopTestCase):
def test_eggs(self):
egg1_path = self.makefile('dragon.egg')
egg2_path = self.makefile('horton.egg')
job = MRNullSpark([
'-r', 'hadoop',
'--py-files', '%s,%s' % (egg1_path, egg2_path)])
job.sandbox()
with job.make_runner() as runner:
runner._create_setup_wrapper_scripts()
runner._add_job_files_for_upload()
self.assertEqual(
runner._py_files(),
[egg1_path, egg2_path, runner._create_mrjob_zip()]
)
# pass the URI of the *uploaded* py_files to Spark
self.assertIn(egg1_path, runner._upload_mgr.path_to_uri())
self.assertIn(egg2_path, runner._upload_mgr.path_to_uri())
egg_uris = ','.join(runner._upload_mgr.uri(path)
for path in runner._py_files())
self.assertIn(egg_uris, runner._spark_submit_args(0))
class SetupLineEncodingTestCase(MockHadoopTestCase):
def test_setup_wrapper_script_uses_unix_line_endings(self):
job = MRTwoStepJob(['-r', 'hadoop', '--setup', 'true'])
job.sandbox()
add_mock_hadoop_output([b''])
add_mock_hadoop_output([b''])
# tests #1071. Unfortunately, we mostly run these tests on machines
# that use unix line endings anyway. So monitor open() instead
with patch(
'mrjob.runner.open', create=True, side_effect=open) as m_open:
with job.make_runner() as runner:
runner.run()
self.assertIn(
call(runner._setup_wrapper_script_path, 'wb'),
m_open.mock_calls)
class PickErrorTestCase(MockHadoopTestCase):
# integration tests for _pick_error()
def setUp(self):
super(PickErrorTestCase, self).setUp()
os.environ['MOCK_HADOOP_VERSION'] = '2.7.0'
self.runner = HadoopJobRunner()
def test_empty(self):
self.assertEqual(self.runner._pick_error({}, 'streaming'), None)
def test_yarn_python_exception(self):
APPLICATION_ID = 'application_1450486922681_0004'
CONTAINER_ID = 'container_1450486922681_0005_01_000003'
JOB_ID = 'job_1450486922681_0004'
log_subdir = os.path.join(
os.environ['HADOOP_HOME'], 'logs',
'userlogs', APPLICATION_ID, CONTAINER_ID)
os.makedirs(log_subdir)
syslog_path = os.path.join(log_subdir, 'syslog')
with open(syslog_path, 'w') as syslog:
syslog.write(
'2015-12-21 14:06:17,707 INFO [main]'
' org.apache.hadoop.mapred.MapTask: Processing split:'
' hdfs://e4270474c8ee:9000/user/root/tmp/mrjob'
'/mr_boom.root.20151221.190511.059097/files'
'/bootstrap.sh:0+335\n')
syslog.write(
'2015-12-21 14:06:18,538 WARN [main]'
' org.apache.hadoop.mapred.YarnChild: Exception running child'
' : java.lang.RuntimeException:'
' PipeMapRed.waitOutputThreads(): subprocess failed with'
' code 1\n')
syslog.write(
' at org.apache.hadoop.streaming.PipeMapRed'
'.waitOutputThreads(PipeMapRed.java:322)\n')
stderr_path = os.path.join(log_subdir, 'stderr')
with open(stderr_path, 'w') as stderr:
stderr.write('+ python mr_boom.py --mapper')
stderr.write('Traceback (most recent call last):\n')
stderr.write(' File "mr_boom.py", line 10, in <module>\n')
stderr.write(' MRBoom.run()\n')
stderr.write('Exception: BOOM\n')
error = self.runner._pick_error(
dict(step=dict(application_id=APPLICATION_ID, job_id=JOB_ID)),
'streaming',
)
self.assertIsNotNone(error)
self.assertEqual(error['hadoop_error']['path'], syslog_path)
self.assertEqual(error['task_error']['path'], stderr_path)
class HadoopExtraArgsTestCase(MockHadoopTestCase):
# moved from tests.test_runner.HadoopArgsForStepTestCase because
# hadoop_extra_args isn't defined in the base runner
RUNNER = 'hadoop'
def test_hadoop_extra_args(self):
# hadoop_extra_args doesn't exist in default runner
job = MRWordCount(['-r', self.RUNNER, '--hadoop-args=-foo'])
with job.make_runner() as runner:
self.assertEqual(runner._hadoop_args_for_step(0), ['-foo'])
def test_hadoop_extra_args_comes_after_jobconf(self):
job = MRWordCount(
['-r', self.RUNNER,
'--cmdenv', 'FOO=bar',
'--hadoop-args=-libjars qux.jar',
'-D', 'baz=qux'])
job.HADOOP_INPUT_FORMAT = 'FooInputFormat'
job.HADOOP_OUTPUT_FORMAT = 'BarOutputFormat'
with job.make_runner() as runner:
hadoop_args = runner._hadoop_args_for_step(0)
self.assertEqual(
hadoop_args[:4],
['-D', 'baz=qux', '-libjars', 'qux.jar'])
self.assertEqual(len(hadoop_args), 10)
class LibjarsTestCase(MockHadoopTestCase):
def add_files_for_upload(self, runner):
# need both of these for _args_for_streaming_step() to work
runner._add_input_files_for_upload()
runner._add_job_files_for_upload()
def test_empty(self):
job = MRWordCount(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.add_files_for_upload(runner)
args = runner._args_for_streaming_step(0)
self.assertNotIn('-libjars', args)
def test_one_jar(self):
job = MRWordCount([
'-r', 'hadoop',
'--libjars', '/path/to/a.jar',
])
job.sandbox()
with job.make_runner() as runner:
self.add_files_for_upload(runner)
args = runner._args_for_streaming_step(0)
self.assertIn('-libjars', args)
self.assertIn('/path/to/a.jar', args)
def test_two_jars(self):
job = MRWordCount([
'-r', 'hadoop',
'--libjars', '/path/to/a.jar',
'--libjars', '/path/to/b.jar',
])
job.sandbox()
with job.make_runner() as runner:
self.add_files_for_upload(runner)
args = runner._args_for_streaming_step(0)
self.assertIn('-libjars', args)
self.assertIn('/path/to/a.jar,/path/to/b.jar', args)
class FindBinariesAndJARsTestCase(SandboxedTestCase):
def setUp(self):
super(FindBinariesAndJARsTestCase, self).setUp()
self.get_hadoop_version = self.start(patch(
'mrjob.hadoop.HadoopJobRunner.get_hadoop_version'))
self.get_hadoop_streaming_jar = self.start(patch(
'mrjob.hadoop.HadoopJobRunner.get_hadoop_streaming_jar'))
self.get_spark_submit_bin = self.start(patch(
'mrjob.hadoop.HadoopJobRunner.get_spark_submit_bin'))
def test_always_call_get_hadoop_version(self):
runner = HadoopJobRunner()
runner._find_binaries_and_jars()
self.assertTrue(self.get_hadoop_version.called)
self.assertFalse(self.get_hadoop_streaming_jar.called)
self.assertFalse(self.get_spark_submit_bin.called)
def test_streaming_steps(self):
job = MRTwoStepJob(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
runner._find_binaries_and_jars()
self.assertTrue(self.get_hadoop_version.called)
self.assertTrue(self.get_hadoop_streaming_jar.called)
self.assertFalse(self.get_spark_submit_bin.called)
def test_spark_steps(self):
job = MRNullSpark(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
runner._find_binaries_and_jars()
self.assertTrue(self.get_hadoop_version.called)
self.assertFalse(self.get_hadoop_streaming_jar.called)
self.assertTrue(self.get_spark_submit_bin.called)
def test_streaming_and_spark(self):
job = MRStreamingAndSpark(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
runner._find_binaries_and_jars()
self.assertTrue(self.get_hadoop_version.called)
self.assertTrue(self.get_hadoop_streaming_jar.called)
self.assertTrue(self.get_spark_submit_bin.called)
class SparkMasterAndDeployModeTestCase(MockHadoopTestCase):
def test_default(self):
mr_job = MRNullSpark(['-r', 'hadoop'])
mr_job.sandbox()
with mr_job.make_runner() as runner:
runner._add_job_files_for_upload()
self.assertEqual(
runner._spark_submit_args(0)[4:8],
['--master', 'yarn', '--deploy-mode', 'client']
)
def test_spark_master_opt(self):
# these are hard-coded and always the same
mr_job = MRNullSpark(['-r', 'hadoop', '--spark-master', 'local'])
mr_job.sandbox()
with mr_job.make_runner() as runner:
runner._add_job_files_for_upload()
self.assertEqual(
runner._spark_submit_args(0)[2:6],
['--master', 'local', '--deploy-mode', 'client']
)
def test_spark_deploy_mode_opt(self):
# these are hard-coded and always the same
mr_job = MRNullSpark(['-r', 'hadoop',
'--spark-deploy-mode', 'cluster'])
mr_job.sandbox()
with mr_job.make_runner() as runner:
runner._add_job_files_for_upload()
self.assertEqual(
runner._spark_submit_args(0)[4:8],
['--master', 'yarn', '--deploy-mode', 'cluster']
)
class GetHadoopBinTestCase(MockHadoopTestCase):
# mostly exists to test --hadoop-bin ''
def test_default(self):
job = MRTwoStepJob(['-r', 'hadoop'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(runner.get_hadoop_bin(),
[self.hadoop_bin])
def test_custom_hadoop_bin(self):
job = MRTwoStepJob(['-r', 'hadoop', '--hadoop-bin', 'hadoop -v'])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(runner.get_hadoop_bin(),
['hadoop', '-v'])
def test_empty_hadoop_bin_means_find_hadoop_bin(self):
job = MRTwoStepJob(['-r', 'hadoop', '--hadoop-bin', ''])
job.sandbox()
with job.make_runner() as runner:
self.assertEqual(runner.get_hadoop_bin(),
[self.hadoop_bin])
|
py | 7df8f06a602db573010292bc753313e80eda0274 | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = ["airbyte-cdk~=0.1"]
TEST_REQUIREMENTS = [
"pytest~=6.1",
"pytest-mock",
"source-acceptance-test",
]
setup(
name="source_klaviyo",
description="Source implementation for Klaviyo.",
author="Airbyte",
author_email="[email protected]",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
|
py | 7df8f0e1aa620c3ecd42e973e549594656b2e1fb | """
Flux for Home-Assistant.
The idea was taken from https://github.com/KpaBap/hue-flux/
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.flux/
"""
import datetime
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.light import (
is_on, turn_on, VALID_TRANSITION, ATTR_TRANSITION)
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import (
CONF_NAME, CONF_PLATFORM, CONF_LIGHTS, CONF_MODE)
from homeassistant.helpers.event import track_time_change
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.util import slugify
from homeassistant.util.color import (
color_temperature_to_rgb, color_RGB_to_xy_brightness,
color_temperature_kelvin_to_mired)
from homeassistant.util.dt import now as dt_now
_LOGGER = logging.getLogger(__name__)
CONF_START_TIME = 'start_time'
CONF_STOP_TIME = 'stop_time'
CONF_START_CT = 'start_colortemp'
CONF_SUNSET_CT = 'sunset_colortemp'
CONF_STOP_CT = 'stop_colortemp'
CONF_BRIGHTNESS = 'brightness'
CONF_DISABLE_BRIGHTNESS_ADJUST = 'disable_brightness_adjust'
CONF_INTERVAL = 'interval'
MODE_XY = 'xy'
MODE_MIRED = 'mired'
MODE_RGB = 'rgb'
DEFAULT_MODE = MODE_XY
DEPENDENCIES = ['light']
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'flux',
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(CONF_NAME, default="Flux"): cv.string,
vol.Optional(CONF_START_TIME): cv.time,
vol.Optional(CONF_STOP_TIME): cv.time,
vol.Optional(CONF_START_CT, default=4000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_SUNSET_CT, default=3000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_STOP_CT, default=1900):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
vol.Optional(CONF_DISABLE_BRIGHTNESS_ADJUST): cv.boolean,
vol.Optional(CONF_MODE, default=DEFAULT_MODE):
vol.Any(MODE_XY, MODE_MIRED, MODE_RGB),
vol.Optional(CONF_INTERVAL, default=30): cv.positive_int,
vol.Optional(ATTR_TRANSITION, default=30): VALID_TRANSITION
})
def set_lights_xy(hass, lights, x_val, y_val, brightness, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
xy_color=[x_val, y_val],
brightness=brightness,
transition=transition,
white_value=brightness)
def set_lights_temp(hass, lights, mired, brightness, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
color_temp=int(mired),
brightness=brightness,
transition=transition)
def set_lights_rgb(hass, lights, rgb, transition):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
rgb_color=rgb,
transition=transition)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Flux switches."""
name = config.get(CONF_NAME)
lights = config.get(CONF_LIGHTS)
start_time = config.get(CONF_START_TIME)
stop_time = config.get(CONF_STOP_TIME)
start_colortemp = config.get(CONF_START_CT)
sunset_colortemp = config.get(CONF_SUNSET_CT)
stop_colortemp = config.get(CONF_STOP_CT)
brightness = config.get(CONF_BRIGHTNESS)
disable_brightness_adjust = config.get(CONF_DISABLE_BRIGHTNESS_ADJUST)
mode = config.get(CONF_MODE)
interval = config.get(CONF_INTERVAL)
transition = config.get(ATTR_TRANSITION)
flux = FluxSwitch(name, hass, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, disable_brightness_adjust, mode, interval,
transition)
add_devices([flux])
def update(call=None):
"""Update lights."""
flux.flux_update()
service_name = slugify("{} {}".format(name, 'update'))
hass.services.register(DOMAIN, service_name, update)
class FluxSwitch(SwitchDevice):
"""Representation of a Flux switch."""
def __init__(self, name, hass, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, disable_brightness_adjust, mode, interval,
transition):
"""Initialize the Flux switch."""
self._name = name
self.hass = hass
self._lights = lights
self._start_time = start_time
self._stop_time = stop_time
self._start_colortemp = start_colortemp
self._sunset_colortemp = sunset_colortemp
self._stop_colortemp = stop_colortemp
self._brightness = brightness
self._disable_brightness_adjust = disable_brightness_adjust
self._mode = mode
self._interval = interval
self._transition = transition
self.unsub_tracker = None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.unsub_tracker is not None
def turn_on(self, **kwargs):
"""Turn on flux."""
if self.is_on:
return
# Make initial update
self.flux_update()
self.unsub_tracker = track_time_change(
self.hass, self.flux_update, second=[0, self._interval])
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn off flux."""
if self.unsub_tracker is not None:
self.unsub_tracker()
self.unsub_tracker = None
self.schedule_update_ha_state()
def flux_update(self, now=None):
"""Update all the lights using flux."""
if now is None:
now = dt_now()
sunset = get_astral_event_date(self.hass, 'sunset', now.date())
start_time = self.find_start_time(now)
stop_time = self.find_stop_time(now)
if stop_time <= start_time:
# stop_time does not happen in the same day as start_time
if start_time < now:
# stop time is tomorrow
stop_time += datetime.timedelta(days=1)
elif now < start_time:
# stop_time was yesterday since the new start_time is not reached
stop_time -= datetime.timedelta(days=1)
if start_time < now < sunset:
# Daytime
time_state = 'day'
temp_range = abs(self._start_colortemp - self._sunset_colortemp)
day_length = int(sunset.timestamp() - start_time.timestamp())
seconds_from_start = int(now.timestamp() - start_time.timestamp())
percentage_complete = seconds_from_start / day_length
temp_offset = temp_range * percentage_complete
if self._start_colortemp > self._sunset_colortemp:
temp = self._start_colortemp - temp_offset
else:
temp = self._start_colortemp + temp_offset
else:
# Night time
time_state = 'night'
if now < stop_time:
if stop_time < start_time and stop_time.day == sunset.day:
# we need to use yesterday's sunset time
sunset_time = sunset - datetime.timedelta(days=1)
else:
sunset_time = sunset
night_length = int(stop_time.timestamp() -
sunset_time.timestamp())
seconds_from_sunset = int(now.timestamp() -
sunset_time.timestamp())
percentage_complete = seconds_from_sunset / night_length
else:
percentage_complete = 1
temp_range = abs(self._sunset_colortemp - self._stop_colortemp)
temp_offset = temp_range * percentage_complete
if self._sunset_colortemp > self._stop_colortemp:
temp = self._sunset_colortemp - temp_offset
else:
temp = self._sunset_colortemp + temp_offset
rgb = color_temperature_to_rgb(temp)
x_val, y_val, b_val = color_RGB_to_xy_brightness(*rgb)
brightness = self._brightness if self._brightness else b_val
if self._disable_brightness_adjust:
brightness = None
if self._mode == MODE_XY:
set_lights_xy(self.hass, self._lights, x_val,
y_val, brightness, self._transition)
_LOGGER.info("Lights updated to x:%s y:%s brightness:%s, %s%% "
"of %s cycle complete at %s", x_val, y_val,
brightness, round(
percentage_complete * 100), time_state, now)
elif self._mode == MODE_RGB:
set_lights_rgb(self.hass, self._lights, rgb, self._transition)
_LOGGER.info("Lights updated to rgb:%s, %s%% "
"of %s cycle complete at %s", rgb,
round(percentage_complete * 100), time_state, now)
else:
# Convert to mired and clamp to allowed values
mired = color_temperature_kelvin_to_mired(temp)
set_lights_temp(self.hass, self._lights, mired, brightness,
self._transition)
_LOGGER.info("Lights updated to mired:%s brightness:%s, %s%% "
"of %s cycle complete at %s", mired, brightness,
round(percentage_complete * 100), time_state, now)
def find_start_time(self, now):
"""Return sunrise or start_time if given."""
if self._start_time:
sunrise = now.replace(
hour=self._start_time.hour, minute=self._start_time.minute,
second=0)
else:
sunrise = get_astral_event_date(self.hass, 'sunrise', now.date())
return sunrise
def find_stop_time(self, now):
"""Return dusk or stop_time if given."""
if self._stop_time:
dusk = now.replace(
hour=self._stop_time.hour, minute=self._stop_time.minute,
second=0)
else:
dusk = get_astral_event_date(self.hass, 'dusk', now.date())
return dusk
|
py | 7df8f283e7c7085a6e1c12d81443ec3d2a3f7930 | import sys
import os
import time
import argparse
import glob
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch import Tensor
from PIL import Image
import cv2
import numpy as np
import craft_utils
import imgproc
import file_utils
import json
import zipfile
import test
from tqdm import tqdm
from craft import CRAFT
from dataset import CRAFTDataset
from torch.utils.data import DataLoader
from collections import OrderedDict
def train(args):
# load net
net = CRAFT() # initialize
if not os.path.exists(args.trained_model):
args.trained_model = None
if args.trained_model is not None:
print('Loading weights from checkpoint (' + args.trained_model + ')')
if args.cuda:
net.load_state_dict(test.copyStateDict(torch.load(args.trained_model)))
else:
net.load_state_dict(test.copyStateDict(torch.load(args.trained_model, map_location='cpu')))
if args.cuda:
net = net.cuda()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
# # LinkRefiner
# refine_net = None
# if args.refine:
# from refinenet import RefineNet
#
# refine_net = RefineNet()
# print('Loading weights of refiner from checkpoint (' + args.refiner_model + ')')
# if args.cuda:
# refine_net.load_state_dict(test.copyStateDict(torch.load(args.refiner_model)))
# refine_net = refine_net.cuda()
# refine_net = torch.nn.DataParallel(refine_net)
# else:
# refine_net.load_state_dict(test.copyStateDict(torch.load(args.refiner_model, map_location='cpu')))
#
# args.poly = True
criterion = craft_utils.CRAFTLoss()
optimizer = optim.Adam(net.parameters(), args.learning_rate)
train_data = CRAFTDataset(args)
dataloader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True)
t0 = time.time()
for epoch in range(args.max_epoch):
pbar = tqdm(enumerate(dataloader), total=len(dataloader), desc=f'Epoch {epoch}')
running_loss = 0.0
for i, data in pbar:
x, y_region, y_link, y_conf = data
x = x.cuda()
y_region = y_region.cuda()
y_link = y_link.cuda()
y_conf = y_conf.cuda()
optimizer.zero_grad()
y, feature = net(x)
score_text = y[:, :, :, 0]
score_link = y[:, :, :, 1]
L = criterion(score_text, score_link, y_region, y_link, y_conf)
L.backward()
optimizer.step()
running_loss += L.data.item()
if i % 2000 == 1999 or i == len(dataloader) - 1:
pbar.set_postfix_str('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / min(i + 1, 2000)))
running_loss = 0.0
# Save trained model
torch.save(net.state_dict(), args.weight)
print(f'training finished\n {time.time() - t0} spent for {args.max_epoch} epochs')
|
py | 7df8f38587b2b528c932373228e755f76c419855 | # Generated by Django 3.1.3 on 2020-12-05 12:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MyModelName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('my_field_name', models.CharField(help_text='Enter Field Documentation', max_length=20)),
],
options={
'ordering': ['-my_field_name'],
},
),
]
|
py | 7df8f3d11f6f7fa4feab14698f6c45a757af104e | # This is a Bokeh server app. To function, it must be run using the
# Bokeh server ath the command line:
#
# bokeh serve --show patch_app.py
#
# Running "python patch_app.py" will NOT work.
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
# CDS with "typical" scalar elements
x = np.random.uniform(10, size=500)
y = np.random.uniform(10, size=500)
color = ["navy"]*500
color[:200] = ["firebrick"]*200
source = ColumnDataSource(data=dict(x=x, y=y, color=color))
p = figure(plot_width=400, plot_height=400)
p.circle('x', 'y', alpha=0.6, size=8, color="color", source=source)
# CDS with 1d array elements
x = np.linspace(0, 10, 200)
y0 = np.sin(x)
y1 = np.cos(x)
source1d = ColumnDataSource(data=dict(xs=[x, x], ys=[y0, y1], color=["olive", "navy"]))
p1d = figure(plot_width=400, plot_height=400)
p1d.multi_line('xs', 'ys', alpha=0.6, line_width=4, color="color", source=source1d)
# CDS with 2d image elements
N = 200
img = np.empty((N,N), dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape((N, N, 4))
for i in range(N):
for j in range(N):
view[i, j, :] = [int(j/N*255), int(i/N*255), 158, 255]
source2d = ColumnDataSource(data=dict(img=[img]))
p2d = figure(plot_width=400, plot_height=400, x_range=(0,10), y_range=(0,10))
p2d.image_rgba(image='img', x=0, y=0, dw=10, dh=10, source=source2d)
def update():
# update some items in the "typical" CDS column
s = slice(100)
new_x = source.data['x'][s] + np.random.uniform(-0.1, 0.1, size=100)
new_y = source.data['y'][s] + np.random.uniform(-0.2, 0.2, size=100)
source.patch({ 'x' : [(s, new_x)], 'y' : [(s, new_y)] })
# update a single point of the 1d multi-line data
i = np.random.randint(200)
new_y = source1d.data['ys'][0][i] + (0.2 * np.random.random()-0.1)
source1d.patch({ 'ys' : [([0, i], [new_y])]})
# update five rows of the 2d image data at a time
s1, s2 = slice(50, 151, 20), slice(None)
index = [0, s1, s2]
new_data = np.roll(source2d.data['img'][0][s1, s2], 2, axis=1).flatten()
source2d.patch({ 'img' : [(index, new_data)] })
curdoc().add_periodic_callback(update, 50)
curdoc().add_root(gridplot([[p, p1d, p2d]]))
|
py | 7df8f3e6ec4c299891a2a0d2466aa649ebe88fe4 | from .responses import CognitoIdpResponse, CognitoIdpJsonWebKeyResponse
url_bases = [r"https?://cognito-idp\.(.+)\.amazonaws.com"]
url_paths = {
"{0}/$": CognitoIdpResponse.dispatch,
"{0}/(?P<user_pool_id>[^/]+)/.well-known/jwks.json$": CognitoIdpJsonWebKeyResponse().serve_json_web_key,
}
|
py | 7df8f40c2e4c3211ab37807107caffde18fdf719 | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class ClassLoaderPathTest( GafferTest.TestCase ) :
def test( self ) :
p = Gaffer.ClassLoaderPath( IECore.ClassLoader.defaultOpLoader(), "/" )
self.failUnless( p.isValid() )
self.failIf( p.isLeaf() )
p.append( "files" )
self.failUnless( p.isValid() )
self.failIf( p.isLeaf() )
p.append( "iDontExist" )
self.failIf( p.isValid() )
self.failIf( p.isLeaf() )
del p[-1]
self.failUnless( p.isValid() )
self.failIf( p.isLeaf() )
p.setFromString( "/files/sequenceRenumber" )
self.failUnless( p.isValid() )
self.failUnless( p.isLeaf() )
p.setFromString( "/files" )
children = p.children()
for child in children :
self.failUnless( isinstance( child, Gaffer.ClassLoaderPath ) )
self.assertEqual( len( child ), len( p ) + 1 )
self.failUnless( child.isLeaf() )
children = [ str( x ) for x in children ]
self.failUnless( "/files/sequenceCopy" in children )
self.failUnless( "/files/sequenceLs" in children )
self.failUnless( "/files/sequenceMove" in children )
p.setFromString( "/" )
children = p.children()
for child in children :
self.failUnless( isinstance( child, Gaffer.ClassLoaderPath ) )
self.assertEqual( len( child ), len( p ) + 1 )
p.setFromString( "/mesh/normals" )
versions = p.info()["classLoader:versions"]
self.failUnless( isinstance( versions, list ) )
self.failUnless( len( versions ) )
def testRelative( self ) :
p = Gaffer.ClassLoaderPath( IECore.ClassLoader.defaultOpLoader(), "files" )
self.assertEqual( str( p ), "files" )
self.assertEqual( p.root(), "" )
self.assertTrue( "files/sequenceRenumber" in [ str( c ) for c in p.children() ] )
p2 = p.copy()
self.assertEqual( str( p2 ), "files" )
self.assertEqual( p2.root(), "" )
self.assertTrue( "files/sequenceRenumber" in [ str( c ) for c in p2.children() ] )
def testLoad( self ) :
p = Gaffer.ClassLoaderPath( IECore.ClassLoader.defaultOpLoader(), "/mesh/normals" )
op = p.load()()
self.failUnless( isinstance( op, IECore.Op ) )
if __name__ == "__main__":
unittest.main()
|
py | 7df8f426cbc4805bcdfb6c2a9ea142524287ee6d | # coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import uptrends
from uptrends.api.monitor_check_api import MonitorCheckApi # noqa: E501
from uptrends.rest import ApiException
class TestMonitorCheckApi(unittest.TestCase):
"""MonitorCheckApi unit test stubs"""
def setUp(self):
self.api = uptrends.api.monitor_check_api.MonitorCheckApi() # noqa: E501
def tearDown(self):
pass
def test_monitor_check_get_account_monitor_checks(self):
"""Test case for monitor_check_get_account_monitor_checks
Returns all monitor check data. # noqa: E501
"""
pass
def test_monitor_check_get_http_details(self):
"""Test case for monitor_check_get_http_details
Returns HTTP details for a monitor check. # noqa: E501
"""
pass
def test_monitor_check_get_monitor_check(self):
"""Test case for monitor_check_get_monitor_check
Returns monitor check data for a specific monitor. # noqa: E501
"""
pass
def test_monitor_check_get_monitor_group_data(self):
"""Test case for monitor_check_get_monitor_group_data
Returns monitor check data for a specific monitor group. # noqa: E501
"""
pass
def test_monitor_check_get_multistep_details(self):
"""Test case for monitor_check_get_multistep_details
Returns Multi-Step API details for a monitor check. # noqa: E501
"""
pass
def test_monitor_check_get_screenshots(self):
"""Test case for monitor_check_get_screenshots
Gets a specific screenshot for a specified monitor check # noqa: E501
"""
pass
def test_monitor_check_get_single_monitor_check(self):
"""Test case for monitor_check_get_single_monitor_check
Returns a single monitor check. # noqa: E501
"""
pass
def test_monitor_check_get_transaction_details(self):
"""Test case for monitor_check_get_transaction_details
Returns transaction step details for a monitor check. # noqa: E501
"""
pass
def test_monitor_check_get_waterfall_info(self):
"""Test case for monitor_check_get_waterfall_info
Returns waterfall information for a monitor check. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
py | 7df8f483ef0f5152df4faab62109cdf077cfb1a9 | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython import MetaData
from qpython.qtype import * # @UnusedWildImport
from numpy import longlong
_MILLIS_PER_DAY = 24 * 60 * 60 * 1000
_MILLIS_PER_DAY_FLOAT = float(_MILLIS_PER_DAY)
_QEPOCH_MS = long(10957 * _MILLIS_PER_DAY)
_EPOCH_QTIMESTAMP_NS = _QEPOCH_MS * 1000000
_EPOCH_QMONTH = numpy.datetime64('2000-01', 'M')
_EPOCH_QDATE = numpy.datetime64('2000-01-01', 'D')
_EPOCH_QDATETIME = numpy.datetime64(_QEPOCH_MS, 'ms')
_EPOCH_TIMESTAMP = numpy.datetime64(_EPOCH_QTIMESTAMP_NS, 'ns')
_QMONTH_NULL = qnull(QMONTH)
_QDATE_NULL = qnull(QDATE)
_QDATETIME_NULL = qnull(QDATETIME)
_QMINUTE_NULL = qnull(QMINUTE)
_QSECOND_NULL = qnull(QSECOND)
_QTIME_NULL = qnull(QTIME)
_QTIMESTAMP_NULL = qnull(QTIMESTAMP)
_QTIMESPAN_NULL = qnull(QTIMESPAN)
class QTemporal(object):
'''
Represents a q temporal value.
The :class:`.QTemporal` wraps `numpy.datetime64` or `numpy.timedelta64`
along with meta-information like qtype indicator.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
'''
def __init__(self, dt):
self._datetime = dt
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
@property
def raw(self):
'''Return wrapped datetime object.
:returns: `numpy.datetime64` or `numpy.timedelta64` - wrapped datetime
'''
return self._datetime
def __str__(self):
return '%s [%s]' % (self._datetime, self.meta)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.meta.qtype == other.meta.qtype
and self._datetime == other._datetime)
def __ne__(self, other):
return not self.__eq__(other)
def qtemporal(dt, **meta):
'''Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime
'''
result = QTemporal(dt)
result._meta_init(**meta)
return result
def from_raw_qtemporal(raw, qtype):
'''
Converts raw numeric value to `numpy.datetime64` or `numpy.timedelta64`
instance.
Actual conversion applied to raw numeric value depends on `qtype` parameter.
:Parameters:
- `raw` (`integer`, `float`) - raw representation to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.datetime64` or `numpy.timedelta64` - converted datetime
'''
return _FROM_Q[qtype](raw)
def to_raw_qtemporal(dt, qtype):
'''
Converts datetime/timedelta instance to raw numeric value.
Actual conversion applied to datetime/timedelta instance depends on `qtype`
parameter.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime/timedelta
object to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `integer`, `float` - raw numeric value
'''
return _TO_Q[qtype](dt)
def array_from_raw_qtemporal(raw, qtype):
'''
Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print(array_from_raw_qtemporal(raw, qtype = QDATE))
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError`
'''
if not isinstance(raw, numpy.ndarray):
raise ValueError('raw parameter is expected to be of type: numpy.ndarray. Was: %s' % type(raw))
qtype = -abs(qtype)
conversion = _FROM_RAW_LIST[qtype]
mask = raw == qnull(qtype)
dtype = PY_TYPE[qtype]
array = raw.astype(dtype) if dtype != raw.dtype else raw
array = conversion(array) if conversion else array
null = _NUMPY_NULL[qtype]
array = numpy.where(mask, null, array)
return array
def array_to_raw_qtemporal(array, qtype):
'''
Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print(array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST))
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError`
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray. Was: %s' % type(array))
if not array.dtype.type in (numpy.datetime64, numpy.timedelta64):
raise ValueError('array.dtype is expected to be of type: datetime64 or timedelta64. Was: %s' % array.dtype)
qtype = -abs(qtype)
conversion = _TO_RAW_LIST[qtype]
raw = array.view(numpy.int64).view(numpy.ndarray)
mask = raw == numpy.int64(-2 ** 63)
raw = conversion(raw) if conversion else raw
null = qnull(qtype)
raw = numpy.where(mask, null, raw)
return raw
def _from_qmonth(raw):
if raw == _QMONTH_NULL:
return _NUMPY_NULL[QMONTH]
else:
return _EPOCH_QMONTH + numpy.timedelta64(int(raw), 'M')
def _to_qmonth(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QMONTH).astype(int) if not numpy.isnan(dt) else _QMONTH_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdate(raw):
if raw == _QDATE_NULL:
return _NUMPY_NULL[QDATE]
else:
return _EPOCH_QDATE + numpy.timedelta64(int(raw), 'D')
def _to_qdate(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATE).astype(int) if not numpy.isnan(dt) else _QDATE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdatetime(raw):
if numpy.isnan(raw) or raw == _QDATETIME_NULL:
return _NUMPY_NULL[QDATETIME]
else:
return _EPOCH_QDATETIME + numpy.timedelta64(long(_MILLIS_PER_DAY * raw), 'ms')
def _to_qdatetime(dt):
t_dt = type(dt)
if t_dt == numpy.float64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATETIME).astype(float) / _MILLIS_PER_DAY if not numpy.isnan(dt) else _QDATETIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qminute(raw):
if raw == _QMINUTE_NULL:
return _NUMPY_NULL[QMINUTE]
else:
return numpy.timedelta64(int(raw), 'm')
def _to_qminute(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not numpy.isnan(dt) else _QMINUTE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qsecond(raw):
if raw == _QSECOND_NULL:
return _NUMPY_NULL[QSECOND]
else:
return numpy.timedelta64(int(raw), 's')
def _to_qsecond(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not numpy.isnan(dt) else _QSECOND_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtime(raw):
if raw == _QTIME_NULL:
return _NUMPY_NULL[QTIME]
else:
return numpy.timedelta64(int(raw), 'ms')
def _to_qtime(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not numpy.isnan(dt) else _QTIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimestamp(raw):
if raw == _QTIMESTAMP_NULL:
return _NUMPY_NULL[QTIMESTAMP]
else:
return _EPOCH_TIMESTAMP + numpy.timedelta64(long(raw), 'ns')
def _to_qtimestamp(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_TIMESTAMP).astype(longlong) if not numpy.isnan(dt) else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimespan(raw):
if raw == _QTIMESPAN_NULL:
return _NUMPY_NULL[QTIMESPAN]
else:
return numpy.timedelta64(long(raw), 'ns')
def _to_qtimespan(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(longlong) if not numpy.isnan(dt) else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
_FROM_Q = {
QMONTH: _from_qmonth,
QDATE: _from_qdate,
QDATETIME: _from_qdatetime,
QMINUTE: _from_qminute,
QSECOND: _from_qsecond,
QTIME: _from_qtime,
QTIMESTAMP: _from_qtimestamp,
QTIMESPAN: _from_qtimespan,
}
_TO_Q = {
QMONTH: _to_qmonth,
QDATE: _to_qdate,
QDATETIME: _to_qdatetime,
QMINUTE: _to_qminute,
QSECOND: _to_qsecond,
QTIME: _to_qtime,
QTIMESTAMP: _to_qtimestamp,
QTIMESPAN: _to_qtimespan,
}
_TO_RAW_LIST = {
QMONTH: lambda a: (a - 360).astype(numpy.int32),
QDATE: lambda a: (a - 10957).astype(numpy.int32),
QDATETIME: lambda a: ((a - _QEPOCH_MS) / _MILLIS_PER_DAY_FLOAT).astype(numpy.float64),
QMINUTE: lambda a: a.astype(numpy.int32),
QSECOND: lambda a: a.astype(numpy.int32),
QTIME: lambda a: a.astype(numpy.int32),
QTIMESTAMP: lambda a: a - _EPOCH_QTIMESTAMP_NS,
QTIMESPAN: None,
}
_FROM_RAW_LIST = {
QMONTH: lambda a: numpy.array((a + 360), dtype = 'datetime64[M]'),
QDATE: lambda a: numpy.array((a + 10957), dtype = 'datetime64[D]'),
QDATETIME: lambda a: numpy.array((a * _MILLIS_PER_DAY + _QEPOCH_MS), dtype = 'datetime64[ms]'),
QMINUTE: lambda a: numpy.array(a, dtype = 'timedelta64[m]'),
QSECOND: lambda a: numpy.array(a, dtype = 'timedelta64[s]'),
QTIME: lambda a: numpy.array(a, dtype = 'timedelta64[ms]'),
QTIMESTAMP: lambda a: numpy.array((a + _EPOCH_QTIMESTAMP_NS), dtype = 'datetime64[ns]'),
QTIMESPAN: lambda a: numpy.array(a, dtype = 'timedelta64[ns]'),
}
_NUMPY_NULL = {
QMONTH: numpy.datetime64('NaT', 'M'),
QDATE: numpy.datetime64('NaT', 'D'),
QDATETIME: numpy.datetime64('NaT', 'ms'),
QMINUTE: numpy.timedelta64('NaT', 'm'),
QSECOND: numpy.timedelta64('NaT', 's'),
QTIME: numpy.timedelta64('NaT', 'ms'),
QTIMESTAMP: numpy.datetime64('NaT', 'ns'),
QTIMESPAN: numpy.timedelta64('NaT', 'ns'),
}
|
py | 7df8f50d7ad2269ecde904548d83cf2e00adfaa3 | import os
setupargs = {}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='period',
version='0.1',
description='Python date/time/duration/recurring string parser.',
long_description='',
author='Christopher Grebs',
author_email='[email protected]',
url='https://github.com/EnTeQuAk/period',
packages=['period'],
package_data={'': ['LICENSE']},
include_package_data=True,
install_requires=['pytz'],
license='ISC',
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
),
)
|
py | 7df8f61c9526c3d5e386014bff75f138b3c4062b | """Histograms of ranked posterior draws, plotted for each chain."""
from itertools import cycle
import matplotlib.pyplot as plt
from ..data import convert_to_dataset
from ..labels import BaseLabeller
from ..sel_utils import xarray_var_iter
from ..rcparams import rcParams
from ..stats.density_utils import _sturges_formula
from ..utils import _var_names
from .plot_utils import default_grid, filter_plotters_list, get_plotting_function
def plot_rank(
data,
var_names=None,
filter_vars=None,
transform=None,
coords=None,
bins=None,
kind="bars",
colors="cycle",
ref_line=True,
labels=True,
labeller=None,
grid=None,
figsize=None,
ax=None,
backend=None,
ref_line_kwargs=None,
bar_kwargs=None,
vlines_kwargs=None,
marker_vlines_kwargs=None,
backend_kwargs=None,
show=None,
):
"""Plot rank order statistics of chains.
From the paper: Rank plots are histograms of the ranked posterior draws (ranked over all
chains) plotted separately for each chain.
If all of the chains are targeting the same posterior, we expect the ranks in each chain to be
uniform, whereas if one chain has a different location or scale parameter, this will be
reflected in the deviation from uniformity. If rank plots of all chains look similar, this
indicates good mixing of the chains.
This plot was introduced by Aki Vehtari, Andrew Gelman, Daniel Simpson, Bob Carpenter,
Paul-Christian Burkner (2019): Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC. arXiv preprint https://arxiv.org/abs/1903.08008
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object.
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: string or list of variable names
Variables to be plotted. Prefix the variables by ``~`` when you want to exclude
them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
transform: callable
Function to transform data (defaults to None i.e.the identity function)
coords: mapping, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`
bins: None or passed to np.histogram
Binning strategy used for histogram. By default uses twice the result of Sturges' formula.
See :func:`numpy.histogram` documentation for, other available arguments.
kind: string
If bars (defaults), ranks are represented as stacked histograms (one per chain). If vlines
ranks are represented as vertical lines above or below ``ref_line``.
colors: string or list of strings
List with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically choose a color per model from matplotlib's
cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
models. Defaults to `cycle`.
ref_line: boolean
Whether to include a dashed line showing where a uniform distribution would lie
labels: bool
whether to plot or not the x and y labels, defaults to True
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, ArviZ will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
ref_line_kwargs : dict, optional
Reference line keyword arguments, passed to :meth:`mpl:matplotlib.axes.Axes.axhline` or
:class:`bokeh:bokeh.models.Span`.
bar_kwargs : dict, optional
Bars keyword arguments, passed to :meth:`mpl:matplotlib.axes.Axes.bar` or
:meth:`bokeh:bokeh.plotting.Figure.vbar`.
vlines_kwargs : dict, optional
Vlines keyword arguments, passed to :meth:`mpl:matplotlib.axes.Axes.vlines` or
:meth:`bokeh:bokeh.plotting.Figure.multi_line`.
marker_vlines_kwargs : dict, optional
Marker for the vlines keyword arguments, passed to :meth:`mpl:matplotlib.axes.Axes.plot` or
:meth:`bokeh:bokeh.plotting.Figure.circle`.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`. For additional documentation
check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_trace : Plot distribution (histogram or kernel density estimates) and
sampled values or rank plot.
Examples
--------
Show a default rank plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_rank(data)
Recreate Figure 13 from the arxiv preprint
.. plot::
:context: close-figs
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_rank(data, var_names='tau')
Use vlines to compare results for centered vs noncentered models
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> centered_data = az.load_arviz_data('centered_eight')
>>> noncentered_data = az.load_arviz_data('non_centered_eight')
>>> _, ax = plt.subplots(1, 2, figsize=(12, 3))
>>> az.plot_rank(centered_data, var_names="mu", kind='vlines', ax=ax[0])
>>> az.plot_rank(noncentered_data, var_names="mu", kind='vlines', ax=ax[1])
Change the aesthetics using kwargs
.. plot::
:context: close-figs
>>> az.plot_rank(noncentered_data, var_names="mu", kind="vlines",
>>> vlines_kwargs={'lw':0}, marker_vlines_kwargs={'lw':3});
"""
if transform is not None:
data = transform(data)
posterior_data = convert_to_dataset(data, group="posterior")
if coords is not None:
posterior_data = posterior_data.sel(**coords)
var_names = _var_names(var_names, posterior_data, filter_vars)
plotters = filter_plotters_list(
list(xarray_var_iter(posterior_data, var_names=var_names, combined=True)), "plot_rank"
)
length_plotters = len(plotters)
if bins is None:
bins = _sturges_formula(posterior_data, mult=2)
if labeller is None:
labeller = BaseLabeller()
rows, cols = default_grid(length_plotters, grid=grid)
chains = len(posterior_data.chain)
if colors == "cycle":
colors = [
prop
for _, prop in zip(
range(chains), cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
)
]
elif isinstance(colors, str):
colors = [colors] * chains
rankplot_kwargs = dict(
axes=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
plotters=plotters,
bins=bins,
kind=kind,
colors=colors,
ref_line=ref_line,
labels=labels,
labeller=labeller,
ref_line_kwargs=ref_line_kwargs,
bar_kwargs=bar_kwargs,
vlines_kwargs=vlines_kwargs,
marker_vlines_kwargs=marker_vlines_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_rank", "rankplot", backend)
axes = plot(**rankplot_kwargs)
return axes
|
py | 7df8f7f8be3f7d5401cffd84013f4487ef666f05 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .Input.LandUse.TotAreaMeters import TotAreaMeters
from .MultiUse_Fxns.Discharge.StreamFlow_1 import StreamFlow_1_f
from .MultiUse_Fxns.LossFactAdj import LossFactAdj_f
from .MultiUse_Fxns.Runoff.UrbRunoffLiter import UrbRunoffLiter_f
"""
Imported from AFOS.bas
"""
import logging
log = logging.getLogger(__name__)
def AnimalOperations(z, Y):
for i in range(12):
z.NGLostManP[Y][i] = (z.NGAppManP[i] * z.NGAppPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
* (1 - z.NGPctSoilIncRate[i]))
if z.NGLostManP[Y][i] > z.NGAppManP[i]:
z.NGLostManP[Y][i] = z.NGAppManP[i]
if z.NGLostManP[Y][i] < 0:
z.NGLostManP[Y][i] = 0
z.NGLostManFC[Y][i] = (z.NGAppManFC[i] * z.NGAppFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
* (1 - z.NGPctSoilIncRate[i]))
if z.NGLostManFC[Y][i] > z.NGAppManFC[i]:
z.NGLostManFC[Y][i] = z.NGAppManFC[i]
if z.NGLostManFC[Y][i] < 0:
z.NGLostManFC[Y][i] = 0
z.NGLostBarnP[Y][i] = (z.NGInitBarnP[i] * z.NGBarnPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
- z.NGInitBarnP[i] * z.NGBarnPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.AWMSNgPct * z.NgAWMSCoeffP
+ z.NGInitBarnP[i] * z.NGBarnPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.RunContPct * z.RunConCoeffP)
if z.NGLostBarnP[Y][i] > z.NGInitBarnP[i]:
z.NGLostBarnP[Y][i] = z.NGInitBarnP[i]
if z.NGLostBarnP[Y][i] < 0:
z.NGLostBarnP[Y][i] = 0
z.NGLostBarnFC[Y][i] = (z.NGInitBarnFC[i] * z.NGBarnFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
- z.NGInitBarnFC[i] * z.NGBarnFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.AWMSNgPct * z.NgAWMSCoeffP
+ z.NGInitBarnFC[i] * z.NGBarnFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.RunContPct * z.RunConCoeffP)
if z.NGLostBarnFC[Y][i] > z.NGInitBarnFC[i]:
z.NGLostBarnFC[Y][i] = z.NGInitBarnFC[i]
if z.NGLostBarnFC[Y][i] < 0:
z.NGLostBarnFC[Y][i] = 0
# Grazing animal losses
z.GRLostManP[Y][i] = (z.GRAppManP[i] * z.GRAppPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
* (1 - z.GRPctSoilIncRate[i]))
if z.GRLostManP[Y][i] > z.GRAppManP[i]:
z.GRLostManP[Y][i] = z.GRAppManP[i]
if z.GRLostManP[Y][i] < 0:
z.GRLostManP[Y][i] = 0
z.GRLostManFC[Y][i] = (z.GRAppManFC[i] * z.GRAppFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
* (1 - z.GRPctSoilIncRate[i]))
if z.GRLostManFC[Y][i] > z.GRAppManFC[i]:
z.GRLostManFC[Y][i] = z.GRAppManFC[i]
if z.GRLostManFC[Y][i] < 0:
z.GRLostManFC[Y][i] = 0
z.GRLostBarnP[Y][i] = (z.GRInitBarnP[i] * z.GRBarnPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
- z.GRInitBarnP[i] * z.GRBarnPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.AWMSGrPct * z.GrAWMSCoeffP
+ z.GRInitBarnP[i] * z.GRBarnPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.RunContPct * z.RunConCoeffP)
if z.GRLostBarnP[Y][i] > z.GRInitBarnP[i]:
z.GRLostBarnP[Y][i] = z.GRInitBarnP[i]
if z.GRLostBarnP[Y][i] < 0:
z.GRLostBarnP[Y][i] = 0
z.GRLostBarnFC[Y][i] = (z.GRInitBarnFC[i] * z.GRBarnFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i]
- z.GRInitBarnFC[i] * z.GRBarnFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.AWMSGrPct * z.GrAWMSCoeffP
+ z.GRInitBarnFC[i] * z.GRBarnFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][
i] * z.RunContPct * z.RunConCoeffP)
if z.GRLostBarnFC[Y][i] > z.GRInitBarnFC[i]:
z.GRLostBarnFC[Y][i] = z.GRInitBarnFC[i]
if z.GRLostBarnFC[Y][i] < 0:
z.GRLostBarnFC[Y][i] = 0
z.GRLossP[Y][i] = ((z.GrazingP[i] - z.GRStreamP[i])
* z.GrazingPRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i])
if z.GRLossP[Y][i] > (z.GrazingP[i] - z.GRStreamP[i]):
z.GRLossP[Y][i] = (z.GrazingP[i] - z.GRStreamP[i])
if z.GRLossP[Y][i] < 0:
z.GRLossP[Y][i] = 0
z.GRLossFC[Y][i] = ((z.GrazingFC[i] - z.GRStreamFC[i])
* z.GrazingFCRate[i] * LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i])
if z.GRLossFC[Y][i] > (z.GrazingFC[i] - z.GRStreamFC[i]):
z.GRLossFC[Y][i] = (z.GrazingFC[i] - z.GRStreamFC[i])
if z.GRLossFC[Y][i] < 0:
z.GRLossFC[Y][i] = 0
# Total animal related losses
z.AnimalP[Y][i] = ((z.NGLostManP[Y][i]
+ z.GRLostManP[Y][i]
+ z.NGLostBarnP[Y][i]
+ z.GRLostBarnP[Y][i]
+ z.GRLossP[Y][i]
+ z.GRStreamP[i])
- ((z.NGLostManP[Y][i] + z.NGLostBarnP[Y][i]) * z.PhytasePct * z.PhytaseCoeff))
z.AnimalFC[Y][i] = (z.NGLostManFC[Y][i]
+ z.GRLostManFC[Y][i]
+ z.NGLostBarnFC[Y][i]
+ z.GRLostBarnFC[Y][i]
+ z.GRLossFC[Y][i]
+ z.GRStreamFC[i])
# CACULATE PATHOGEN LOADS
z.ForestAreaTotalSqMi = 0
z.ForestAreaTotalSqMi = (z.ForestAreaTotal * 0.01) / 2.59
z.PtFlowLiters = (z.PointFlow[i] / 100) * TotAreaMeters(z.NRur, z.NUrb, z.Area) * 1000
# Get the wildlife orgs
z.WWOrgs[Y][i] = z.PtFlowLiters * (z.WWTPConc * 10) * (1 - z.InstreamDieoff)
z.SSOrgs[Y][i] = (z.SepticOrgsDay
* z.SepticsDay[i]
* z.DaysMonth[Y][i]
* z.SepticFailure
* (1 - z.InstreamDieoff))
if LossFactAdj_f(z.Prec, z.DaysMonth)[Y][i] * (1 - z.WuDieoff) > 1:
z.UrbOrgs[Y][i] = (z.UrbRunoffLiter[Y][i]
* (z.UrbEMC * 10)
* (1 - z.InstreamDieoff))
z.WildOrgs[Y][i] = (z.WildOrgsDay
* z.DaysMonth[Y][i]
* z.WildDensity
* z.ForestAreaTotalSqMi
* (1 - z.InstreamDieoff))
else:
z.UrbOrgs[Y][i] = (
UrbRunoffLiter_f(z.NYrs, z.DaysMonth, z.InitSnow_0, z.Temp, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA)[Y][i]
* (z.UrbEMC * 10)
* (1 - z.WuDieoff)
* (1 - z.InstreamDieoff))
z.WildOrgs[Y][i] = (z.WildOrgsDay
* z.DaysMonth[Y][i]
* z.WildDensity
* z.ForestAreaTotalSqMi
* (1 - z.WuDieoff)
* (1 - z.InstreamDieoff))
# Get the total orgs
z.TotalOrgs[Y][i] = (z.WWOrgs[Y][i]
+ z.SSOrgs[Y][i]
+ z.UrbOrgs[Y][i]
+ z.WildOrgs[Y][i]
+ z.AnimalFC[Y][i])
z.CMStream[Y][i] = (StreamFlow_1_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area,
z.CNI_0, z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper,
z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV, z.PcntET, z.DayHrs, z.MaxWaterCap,
z.SatStor_0, z.RecessionCoef, z.SeepCoef)[Y][i] / 100) * TotAreaMeters(
z.NRur, z.NUrb, z.Area)
if z.CMStream[Y][i] > 0:
z.OrgConc[Y][i] = (z.TotalOrgs[Y][i] / (z.CMStream[Y][i] * 1000)) / 10
else:
z.OrgConc[Y][i] = 0
|
py | 7df8f8c786268e4a268cdd30b3d25670a6f7d120 | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <[email protected]>
# Enrico Faulhaber <[email protected]>
# Christian Felder <[email protected]>
#
# *****************************************************************************
"""NICOS Sample device."""
from nicos import session
from nicos.core import InvalidValueError, Moveable, Override, Param, anytype, \
dictof, none_or, oneof, status
from nicos.utils import safeName
class Sample(Moveable):
"""A special device to represent a sample.
An instance of this class is used as the *sample* attached device of the
`Experiment` object. It can be subclassed to add special sample
properties, such as lattice and orientation calculations, or more
parameters describing the sample.
The device stores the collection of all currently defined samples in
its `samples` parameter. When changing samples, it will overwrite the
device's other parameters with these values.
"""
parameters = {
'samplename': Param('Current sample name', type=str, settable=True,
category='sample'),
'samplenumber': Param('Current sample number: e.g. the position in '
'a sample changer or the index of the sample '
'among all defined samples', type=none_or(int),
settable=True),
'samples': Param('Information about all defined samples',
type=dictof(int, dictof(str, anytype)),
settable=True, internal=True, preinit=True),
}
parameter_overrides = {
'unit': Override(mandatory=False, default=''),
}
valuetype = str
def doRead(self, maxage=0):
return self.samplename
def doStatus(self, maxage=0):
return status.OK, ''
def doStart(self, target):
self.select(target)
def doIsAtTarget(self, pos, target):
# never warn about self.target mismatch
return True
@property
def filename(self):
return safeName(self.samplename)
def doWriteSamplename(self, name):
if name:
session.elogEvent('sample', name)
def clear(self):
"""Clear experiment-specific information."""
self.samplename = ''
self.samplenumber = None
self.samples = {}
def new(self, parameters):
"""Create and select a new sample."""
# In this simple base class, we expect the user to use only NewSample,
# so we remove stored sample information every time to avoid a buildup
# of unused sample information.
self.samples = {0: parameters}
self.select(0)
def set(self, number, parameters):
"""Set sample information for sample no. *number*."""
if number is None:
raise InvalidValueError(self, 'cannot use None as sample number')
info = self.samples.copy()
if number in info:
self.log.warning('overwriting parameters for sample %s (%s)',
number, info[number]['name'])
info[number] = parameters
self.samples = info
def select(self, number_or_name):
"""Select sample with given number or name."""
number = self._findIdent(number_or_name)
try:
parameters = self.samples[number]
except KeyError:
raise InvalidValueError(
self, 'cannot find sample with number or name %r' %
number_or_name) from None
self._applyParams(number, parameters)
session.experiment.newSample(parameters)
self.poll()
def _findIdent(self, number_or_name):
"""Find sample number. Can be overridden in subclasses."""
# look by number
if number_or_name in self.samples:
return number_or_name
# look by name
found = None
for (number, parameters) in self.samples.items():
if parameters['name'] == number_or_name:
if found is not None:
# two samples with same name found...
raise InvalidValueError(self, 'two samples with name %r '
'were found, please use the '
'sample number (%s or %s)' %
(number_or_name, found, number))
found = number
return found
def _applyParams(self, number, parameters):
"""Apply sample parameters. Override in subclasses.
All parameters beside the name should be treated as optional by
subclasses, since they will not be provided for the empty sample
created by NewExperiment.
"""
self.samplenumber = number
self.samplename = parameters['name']
self._setROParam('target', parameters['name'])
def doUpdateSamples(self, info):
self.valuetype = oneof(*(info[n]['name'] for n in sorted(info)))
|
py | 7df8f8c7ff44784c628557eedaa6ef961e41a1bf | #!/usr/bin/env python3
# encoding: utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:xxxxx@localhost:3306/trip?charset=utf8'
app.config['SECRET_KEY'] = 'session_key'
db = SQLAlchemy(app)
Bootstrap(app)
from app.views import *
|
py | 7df8f8f131db4bbddc3e57e4424aa3cfdec25e3a | import numpy as np
from LogisticRegression.Sigmoid import sigmoid
from LogisticRegression.Cost import cost
def optimize(X, y, lambdad, maxrun, alpha):
m, n = X.shape
X = np.hstack((X,np.ones((m, 1))))
theta = np.random.rand(n+1, 1)
theta = np.concatenate((np.random.rand(n + 1, 1)))
costs = []
for iter in range(0,maxrun):
first_dev = np.zeros(n+1)
for i in range(m):
h_x = sigmoid(np.dot(X[i,:], theta))
first_dev = first_dev + (h_x - y[i])*X[i,:]
first_dev[1:] = first_dev[1:] + lambdad * theta[1:]
first_dev = (alpha/m) * first_dev
theta_new = theta - first_dev
diff = theta_new - theta
theta = theta_new
costs.append(cost(X, y, theta, lambdad))
if np.dot(diff, diff) < 10**(-6):
break
if iter == maxrun - 1:
print("Failed to converge!")
return theta, costs |
py | 7df8f93fd610ddc3cc770c64d9aacd4f706a443b | class Solution:
def validMountainArray(self, A: List[int]) -> bool:
if len(A) < 3 or A[0] >= A[1] or A[-2] <= A[-1]: return False
i = len(A)
while i > 1 and A[i - 1] < A[i - 2]: i -= 1
while i > 1 and A[i - 2] < A[i - 1]: i -= 1
return i == 1
|
py | 7df8fa65eed63c1f029ecef588ce06563ae9d192 | # flake8: noqa
from allegro.search.crawler import search, crawl
from allegro.search.product import Product, parse_products
|
py | 7df8fc299618b37b8612d40d3114e8a8b0b2d983 | import nltk
def process_input(api_desc):
'''
api_desc: is a description of some data capture needs, for instance :
'I would like to record the books borrowed by different users at all the libraries'
'''
tokenized = nltk.word_tokenize(api_desc)
nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if(pos[:2] == 'NN')]
return nouns
def main():
api_desc = input("Enter a description of the data you wish to use :")
print("")
if api_desc.strip() == "":
raise Exception("Description must be input")
else:
nouns=process_input(api_desc)
if nouns:
print(f'''Nouns found in text input : {api_desc}\n''')
for n in nouns:
print(n)
else:
print(f'''No nouns found in text input : {api_desc}''')
print("")
if __name__ == '''__main__''':
main()
|
py | 7df8fd0d5717bde723dd16fb58f07eaf6535acc5 | import yaml
import logging
log = logging.getLogger(__name__)
with open("config.yaml", "rb") as stream:
log.info("load config from config.yaml")
CONFIG = yaml.load(stream)
|
py | 7df8fd68f62cfb91ef88bd7b66e39ead92642c96 | import tornado
import tornado.websocket
from datetime import timedelta
import datetime, sys, time
from pprint import pprint
import json
import serial.tools.list_ports
import serial
clients = []
arduino_name = "SNR=95333353037351D071B0"
ports = serial.tools.list_ports.comports()
print(ports)
arduino_port = ""
for p in ports:
print (p)
if p[0] != "":
print ("This is an Arduino!")
print (p)
arduino_port = p[0]
if arduino_port == "":
print ("Arduino not found")
sys.exit("where is it?")
# Establish the connection on a specific port
ser = serial.Serial(arduino_port, 115200) #9600)
class WebSocketHandler(tornado.websocket.WebSocketHandler):
tt = datetime.datetime.now()
def check_origin(self, origin):
#print "origin: " + origin
return True
# the client connected
def open(self):
print ("New client connected")
self.write_message("You are connected")
clients.append(self)
tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1), self.test)
def test(self):
try:
info = {}
try:
text = ser.readline() # Read the newest output from the Arduino
print("TEXT: ")
print(text)
values = str(text).split(".")
"""
info = {
"luz" : float(values[1]),
"humedad" : float(values[2]),
"temperatura" : float(values[3]),
"tierra" : float(values[0]),
"estado" : int(values[3]),
"timestamp" : time.time()
}
"""
info = {
"luz" : float(values[0]),
"humedad" : float(values[1]),
"temperatura" : float(values[2]),
#"tierra" : float(values[0]),
"estado" : int(values[3]),
"timestamp" : time.time()
}
print info
except Exception as e:
print("EXCEPTION IN TEST READ FROM READ SERIAL: ")
print(e)
#print(info)
info = {
"humedad" : float("0.0"),
"temperatura" : float("0.0"),
"tierra" : float("0.0"),
"estado" : -1,
"timestamp" : time.time()
}
#raise(e)
print(info)
self.write_message(info)
except Exception as e:
print ("restartplease")
self.write_message("restartplease")
print e
#raise(e)
else:
tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=0.1), self.test)
# the client sent the message
def on_message(self, message):
print ("message: " + message)
try:
message = json.loads(message)
#pprint(message)
except Exception as e:
print ("cant send value to arduino")
#raise(e)
#self.write_message(message)
# client disconnected
def on_close(self):
print ("Client disconnected")
clients.remove(self)
socket = tornado.web.Application([(r"/websocket", WebSocketHandler),])
if __name__ == "__main__":
print("Trying to read data from serial: ")
text = ser.readline()
print("Data from serial: ")
print(text)
values = str(text).split(".")
print values
print("Opening port 8888")
socket.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
py | 7df8fe1e403e9f04652faf6aba76265b56f83070 | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.friends.RelationshipChooser
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.task.Task import Task
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPLocalizer
from otp.otpbase import OTPGlobals
from otp.uberdog.RejectCode import RejectCode
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import GuiPanel
from pirates.piratesgui import PiratesGuiGlobals
from pirates.battle.DistributedBattleNPC import DistributedBattleNPC
from pirates.friends import PirateFriendSecret
from pirates.piratesgui import PirateButtonChain
class RelationshipChooser(GuiPanel.GuiPanel):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('FriendInviter')
def __init__(self, avId, avName, pId=None):
self.avId = avId
self.avName = avName
self.pId = pId
self.avDisableName = 'disable-%s' % avId
if (not self.pId and self).avId:
self.pId = base.cr.playerFriendsManager.findPlayerIdFromAvId(self.avId)
if (not self.avId and self).pId:
self.avId = base.cr.playerFriendsManager.findAvIdFromPlayerId(self.pId)
GuiPanel.GuiPanel.__init__(self, PLocalizer.RelationshipChooserTitle % avName, 0.5, 0.25, True, 1)
self.initialiseoptions(RelationshipChooser)
self.setPos(0.15, 0, 0.25)
self.chain = PirateButtonChain.PirateButtonChain(self.width, self)
self.chain.setPos(0, 0, -0.03)
self.load()
self.determineButtonState()
def load(self):
self.avFriendButton = self.chain.premakeButton(PLocalizer.RelationshipChooserAvFriendsMake, self.__handleAvatarFriend)
self.plFriendButton = self.chain.premakeButton(PLocalizer.RelationshipChooserPlFriendsMake, self.__handlePlayerFriend)
self.secretsButton = self.chain.premakeButton(PLocalizer.RelationshipChooserPlSecrets, self.__handleSecrets)
self.chain.makeButtons()
def destroy(self):
if hasattr(self, 'destroyed'):
return
self.destroyed = 1
self.chain.destroy()
GuiPanel.GuiPanel.destroy(self)
def __handleSecrets(self):
PirateFriendSecret.showFriendSecret()
self.destroy()
def __handleAvatarFriend(self):
base.localAvatar.guiMgr.handleAvatarFriendInvite(self.avId, self.avName)
self.destroy()
def __handlePlayerFriend(self):
base.localAvatar.guiMgr.handlePlayerFriendInvite(self.avId, self.avName)
self.destroy()
def determineButtonState(self):
isPlayerFriend = base.cr.playerFriendsManager.isPlayerFriend(self.pId)
isAvatarFriend = base.cr.avatarFriendsManager.isAvatarFriend(self.avId)
if isPlayerFriend:
self.plFriendButton['text'] = PLocalizer.RelationshipChooserPlFriendsBreak
else:
self.plFriendButton['text'] = PLocalizer.RelationshipChooserPlFriendsMake
if isAvatarFriend:
self.avFriendButton['text'] = PLocalizer.RelationshipChooserAvFriendsBreak
else:
self.avFriendButton['text'] = PLocalizer.RelationshipChooserAvFriendsMake
self.avFriendButton['state'] = DGG.DISABLED
self.plFriendButton['state'] = DGG.DISABLED
self.secretsButton['state'] = DGG.NORMAL
if self.avId or self.pId:
av = base.cr.doId2do.get(self.avId)
print 'avId %s av %s' % (self.avId, av)
if av:
if av.commonChatFlags & base.localAvatar.commonChatFlags & OTPGlobals.CommonChat:
self.plFriendButton['state'] = DGG.NORMAL
self.avFriendButton['state'] = DGG.NORMAL
if base.cr.avatarFriendsManager.checkIgnored(self.avId):
self.avFriendButton['state'] = DGG.DISABLED
self.plFriendButton['state'] = DGG.DISABLED
self.secretsButton['state'] = DGG.DISABLED
if isPlayerFriend:
self.plFriendButton['state'] = DGG.NORMAL
if isAvatarFriend:
self.avFriendButton['state'] = DGG.NORMAL |
py | 7df8ff3ef6b83ad86b15b248c5b5444e35544315 | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for pivx utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, pivx-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
|
py | 7df8ff62de43fae2f133b58d98329548b41e77ca | from ..colors import WHITE_ON_BLACK
from .widget import Widget
from .widget_data_structures import Point, Size, Rect
class ParticleField(Widget):
"""
A widget that only has `Particle` children.
Notes
-----
ParticleFields are an optimized way to render many 1x1 TUI elements.
Raises
------
TypeError if `add_widget` argument is not an instance of `Particle`.
"""
def __init__(self, dim=Size(10, 10), pos=Point(0, 0), *, is_visible=True):
self._dim = dim
self.top, self.left = pos
self.is_visible = is_visible
self.parent = None
self.children = [ ]
def resize(self, dim: Size):
self._dim = dim
for child in self.children:
child.update_geometry()
def add_text(self, text, row=0, column=0):
raise NotImplemented
@property
def get_view(self):
raise NotImplemented
def add_widget(self, widget):
if not isinstance(widget, Particle):
raise TypeError(f"expected Particle, got {type(widget).__name__}")
super().add_widget(widget)
def walk(self):
yield self
yield from self.children
def render(self, canvas_view, colors_view, rect: Rect):
"""
Paint region given by rect into canvas_view and colors_view.
"""
t, l, _, _, h, w = rect
for child in self.children:
pos = top, left = child.top - t, child.left - l
if (
child.is_visible
and not (child.is_transparent and child.char == " ")
and 0 <= top < h
and 0 <= left < w
):
canvas_view[pos] = child.char
colors_view[pos] = child.color
def dispatch_press(self, key_press):
"""
Dispatch key press to children.
"""
# Note this dispatching is in reverse order from widget base.
return (
self.on_press(key_press)
or any(particle.on_press(key_press) for particle in reversed(self.children))
)
def dispatch_click(self, mouse_event):
"""
Dispatch mouse event to children.
"""
# Note this dispatching is in reverse order from widget base.
return (
self.on_click(mouse_event)
or any(particle.on_click(mouse_event) for particle in reversed(self.children))
)
class Particle:
"""
A 1x1 TUI element that's Widget-like, except it has no render method.
Particles require a `ParticleField` parent to be rendered.
"""
def __init__(
self,
pos=Point(0, 0),
*,
char=" ",
color=WHITE_ON_BLACK,
is_transparent=False,
is_visible=True,
):
self.char = char
self.color = color
self.top, self.left = pos
self.is_transparent = is_transparent
self.is_visible = is_visible
self.parent = None
def update_geometry(self):
"""
Update geometry due to a change in parent's size.
"""
@property
def dim(self):
return 1, 1
@property
def pos(self):
return self.top, self.left
@property
def height(self):
return 1
@property
def width(self):
return 1
@property
def bottom(self):
return self.top + 1
@property
def right(self):
return self.left + 1
def absolute_to_relative_coords(self, coords):
"""
Convert absolute coordinates to relative coordinates.
"""
y, x = self.parent.absolute_to_relative_coords(coords)
return y - self.top, x - self.left
def on_press(self, key_press):
"""
Handle key press. (Handled key presses should return True else False or None).
Notes
-----
`key_press` is a `prompt_toolkit` `KeyPress`.
"""
def on_click(self, mouse_event):
"""
Handle mouse event. (Handled mouse events should return True else False or None).
Notes
-----
`mouse_event` is a `prompt_toolkit` MouseEvent`.
"""
|
py | 7df8ffe2ce303737b32533988a8f9132d48e546c | import sys
import re
from numpy.distutils.fcompiler import FCompiler
compilers = ['NAGFCompiler', 'NAGFORCompiler']
class BaseNAGFCompiler(FCompiler):
version_pattern = r'NAG.* Release (?P<version>[^(\s]*)'
def version_match(self, version_string):
m = re.search(self.version_pattern, version_string)
if m:
return m.group('version')
else:
return None
def get_flags_linker_so(self):
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
return []
class NAGFCompiler(BaseNAGFCompiler):
compiler_type = 'nag'
description = 'NAGWare Fortran 95 Compiler'
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f95", "-fixed"],
'compiler_fix' : ["f95", "-fixed"],
'compiler_f90' : ["f95"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform == 'darwin':
return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
return BaseNAGFCompiler.get_flags_linker_so(self)
def get_flags_arch(self):
version = self.get_version()
if version and version < '5.1':
return ['-target=native']
else:
return BaseNAGFCompiler.get_flags_arch(self)
def get_flags_debug(self):
return ['-g', '-gline', '-g90', '-nan', '-C']
class NAGFORCompiler(BaseNAGFCompiler):
compiler_type = 'nagfor'
description = 'NAG Fortran Compiler'
executables = {
'version_cmd' : ["nagfor", "-V"],
'compiler_f77' : ["nagfor", "-fixed"],
'compiler_fix' : ["nagfor", "-fixed"],
'compiler_f90' : ["nagfor"],
'linker_so' : ["nagfor"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_debug(self):
version = self.get_version()
if version and version > '6.1':
return ['-g', '-u', '-nan', '-C=all', '-thread_safe',
'-kind=unique', '-Warn=allocation', '-Warn=subnormal']
else:
return ['-g', '-nan', '-C=all', '-u', '-thread_safe']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
compiler = customized_fcompiler(compiler='nagfor')
print(compiler.get_version())
print(compiler.get_flags_debug())
|
py | 7df90026b2e74127e65e3a16b907ff7380b156b8 | # -*- coding: utf-8 -*-
"""
Created on 2018/10/31
@author: gaoan
"""
class Bar(object):
def __init__(self):
self.time = None
self.open = None
self.high = None
self.low = None
self.close = None
self.volume = None
def __repr__(self):
"""
String representation for this object.
"""
return "Bar(%s)" % self.__dict__
|
py | 7df9007c41d629746c49756d66a1959caf8946f4 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "socialtenant.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | 7df90083bc9213f855c4eaaa79adbcd7699db3a5 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "updown.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | 7df90105f4d760f9f10a7ca69d420a71922450c4 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api import compute
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils.data_utils import parse_image_id
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
class ImagesOneServerTestJSON(base.BaseComputeTest):
_interface = 'json'
def tearDown(self):
"""Terminate test instances created after a test is executed."""
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
super(ImagesOneServerTestJSON, self).tearDown()
@classmethod
def setUpClass(cls):
super(ImagesOneServerTestJSON, cls).setUpClass()
cls.client = cls.images_client
if not cls.config.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
try:
resp, cls.server = cls.create_server(wait_until='ACTIVE')
except Exception:
cls.tearDownClass()
raise
cls.image_ids = []
if compute.MULTI_USER:
if cls.config.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.images_client
@testtools.skip("Skipped until the Bug #1006725 is resolved.")
@attr(type=['negative', 'gate'])
def test_create_image_specify_multibyte_character_image_name(self):
# Return an error if the image name has multi-byte characters
snapshot_name = rand_name('\xef\xbb\xbf')
self.assertRaises(exceptions.BadRequest,
self.client.create_image, self.server['id'],
snapshot_name)
@attr(type=['negative', 'gate'])
def test_create_image_specify_invalid_metadata(self):
# Return an error when creating image with invalid metadata
snapshot_name = rand_name('test-snap-')
meta = {'': ''}
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server['id'], snapshot_name, meta)
@attr(type=['negative', 'gate'])
def test_create_image_specify_metadata_over_limits(self):
# Return an error when creating image with meta data over 256 chars
snapshot_name = rand_name('test-snap-')
meta = {'a' * 260: 'b' * 260}
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server['id'], snapshot_name, meta)
def _get_default_flavor_disk_size(self, flavor_id):
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
return flavor['disk']
@testtools.skipUnless(compute.CREATE_IMAGE_ENABLED,
'Environment unable to create images.')
@attr(type='smoke')
def test_create_delete_image(self):
# Create a new image
name = rand_name('image')
meta = {'image_type': 'test'}
resp, body = self.client.create_image(self.server['id'], name, meta)
self.assertEqual(202, resp.status)
image_id = parse_image_id(resp['location'])
self.client.wait_for_image_status(image_id, 'ACTIVE')
# Verify the image was created correctly
resp, image = self.client.get_image(image_id)
self.assertEqual(name, image['name'])
self.assertEqual('test', image['metadata']['image_type'])
resp, original_image = self.client.get_image(self.image_ref)
# Verify minRAM is the same as the original image
self.assertEqual(image['minRam'], original_image['minRam'])
# Verify minDisk is the same as the original image or the flavor size
flavor_disk_size = self._get_default_flavor_disk_size(self.flavor_ref)
self.assertIn(str(image['minDisk']),
(str(original_image['minDisk']), str(flavor_disk_size)))
# Verify the image was deleted correctly
resp, body = self.client.delete_image(image_id)
self.assertEqual('204', resp['status'])
self.client.wait_for_resource_deletion(image_id)
@attr(type=['negative', 'gate'])
def test_create_second_image_when_first_image_is_being_saved(self):
# Disallow creating another image when first image is being saved
# Create first snapshot
snapshot_name = rand_name('test-snap-')
resp, body = self.client.create_image(self.server['id'],
snapshot_name)
self.assertEqual(202, resp.status)
image_id = parse_image_id(resp['location'])
self.image_ids.append(image_id)
# Create second snapshot
alt_snapshot_name = rand_name('test-snap-')
self.assertRaises(exceptions.Duplicate, self.client.create_image,
self.server['id'], alt_snapshot_name)
self.client.wait_for_image_status(image_id, 'ACTIVE')
@attr(type=['negative', 'gate'])
def test_create_image_specify_name_over_256_chars(self):
# Return an error if snapshot name over 256 characters is passed
snapshot_name = rand_name('a' * 260)
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server['id'], snapshot_name)
@attr(type=['negative', 'gate'])
def test_delete_image_that_is_not_yet_active(self):
# Return an error while trying to delete an image what is creating
snapshot_name = rand_name('test-snap-')
resp, body = self.client.create_image(self.server['id'], snapshot_name)
self.assertEqual(202, resp.status)
image_id = parse_image_id(resp['location'])
self.image_ids.append(image_id)
# Do not wait, attempt to delete the image, ensure it's successful
resp, body = self.client.delete_image(image_id)
self.assertEqual('204', resp['status'])
self.image_ids.remove(image_id)
self.assertRaises(exceptions.NotFound, self.client.get_image, image_id)
class ImagesOneServerTestXML(ImagesOneServerTestJSON):
_interface = 'xml'
|
py | 7df901a5ce97f1403f552425fa166517135e4a78 | import numpy as np
import pandas as pd
import pytest
from rulelist.search.iterative_rule_search import _fit_rulelist
@pytest.fixture
def constant_parameters():
input_n_cutpoints = 5
input_discretization = "static"
input_target_model = "gaussian"
input_max_depth = 5
input_beam_width = 10
input_iterative_beam_width = 1
input_task = "discovery"
input_max_rules= 10
input_alpha_gain = 1
yield input_n_cutpoints, input_discretization, input_target_model,input_max_depth, input_beam_width,\
input_iterative_beam_width, input_task, input_max_rules, input_alpha_gain
@pytest.fixture
def generate_input_dataframe_two_target_normal(constant_parameters):
input_n_cutpoints, input_discretization, input_target_model, input_max_depth, input_beam_width, \
input_iterative_beam_width, input_task, input_max_rules, input_alpha_gain = constant_parameters
dictinput = {"attribute1": np.arange(100000),
"attribute2": np.array(["below1000" if i < 1000 else "above999" for i in range(100000)])}
input_input_data = pd.DataFrame(data=dictinput)
dictoutput = {"target1": np.concatenate((np.random.normal(loc=20,scale=3,size=16666),
np.random.normal(loc=100,scale=6,size=83334)), axis=None),
"target2": np.concatenate((np.random.normal(loc=10,scale=2,size=16666),
np.random.normal(loc=50,scale=5,size=83334)), axis=None)}
input_output_data = pd.DataFrame(data=dictoutput)
yield input_input_data, input_output_data
class TestFitRuleList:
def test_start(self,constant_parameters,generate_input_dataframe_two_target_normal):
input_n_cutpoints, input_discretization, input_target_model, input_max_depth, input_beam_width, \
input_iterative_beam_width, input_task, input_max_rules, input_alpha_gain = constant_parameters
input_input_data, input_output_data = generate_input_dataframe_two_target_normal
output_rulelist = _fit_rulelist(input_input_data, input_output_data, input_target_model, input_max_depth,
input_beam_width, input_iterative_beam_width,input_n_cutpoints, input_task,
input_discretization, input_max_rules, input_alpha_gain)
|
py | 7df901f7d73675f6e9170af47b5e121d41ee13d4 | # %%
# This cell is the unit test for Product. OFFICIAL!
#### For importing files in the repo
import sys
sys.path.insert(0, '..//ocado_scraper')
from product import Product
from selenium import webdriver
from selenium.webdriver.common.by import By
import unittest
from OcadoScraper import OcadoScraper
import os
class ProductTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.chrome_options = webdriver.ChromeOptions()
cls.chrome_options.add_argument("--start-maximized")
cls.chrome_options.add_argument("--headless")
cls.driver = webdriver.Chrome(options=cls.chrome_options)
cls.driver.maximize_window()
with open('product_urls_for_testing', 'r') as f:
lines = f.readlines()
urls_list = []
for line in lines:
urls_list.append(line)
cls.info_list = []
cls.sku_list = []
for i, url in enumerate(urls_list):
print(f'{i/70*100}% done')
product = Product(url)
product.download_images
info = product.scrape_product_data(cls.driver, product.download_images)
cls.info_list.append(info)
cls.sku_list.append(product.get_sku())
if i == 5:
break
def setUp(self):
pass
def test_scrape_product_data(self):
'''
Tests scrape_product_data to find any missing information and counts the number of missing information
'''
print('test_scrape_product_data')
missing_keys = {key : 0 for key in self.info_list[0].keys()}
for dict in self.info_list:
counter = 0
for key, value in dict.items():
if value == None:
missing_keys[key] += 1
print(dict['URL'])
print(f'{dict["Name"]} has a missing value at {key}/n')
elif value != None:
counter += 1
self.assertGreaterEqual(counter, 1)
print(missing_keys)
def test_get_sku(self):
'''
Tests get_sku to see if sku is bigger than 1 and if they are a string of numbers.
'''
for sku in self.sku_list:
print('test_get_sku')
self.assertGreaterEqual(len(sku), 1)
get_sku_int = int(sku)
self.assertIsInstance(sku, str)
self.assertIsInstance(get_sku_int, int)
def test_download_images(self):
'''
Tests to see if path to image file exists.
'''
product = Product('https://www.ocado.com/products/clearwipe-microfibre-lens-wipes-544774011')
product.download_images
product.scrape_product_data(self.driver, product.download_images)
path_pwd = os.path.abspath(os.getcwd())
# image_sku = url.split('-')[-1]
image_path = os.path.isdir(f'{path_pwd}/data/images/544774011')
self.assertTrue(image_path)
def tearDown(self):
pass
# print('tearDown')
# del self.product
# OcadoScraper.delete_downloaded_images()
@classmethod
def tearDownClass(cls):
print('tearDown')
# del cls.product
OcadoScraper.delete_downloaded_images()
unittest.main(argv=[''], verbosity=2, exit=False) |
py | 7df903250da7e46bb68f157c8f947f3c8ac6d6e0 | from typing import Dict
import blspy
from tad.full_node.bundle_tools import simple_solution_generator
from tad.types.blockchain_format.coin import Coin
from tad.types.blockchain_format.program import Program
from tad.types.coin_solution import CoinSolution
from tad.types.condition_opcodes import ConditionOpcode
from tad.types.generator_types import BlockGenerator
from tad.types.spend_bundle import SpendBundle
from tad.util.ints import uint64
from tad.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk, solution_for_conditions
GROUP_ORDER = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001
def int_to_public_key(index: int) -> blspy.G1Element:
index = index % GROUP_ORDER
private_key_from_int = blspy.PrivateKey.from_bytes(index.to_bytes(32, "big"))
return private_key_from_int.get_g1()
def puzzle_hash_for_index(index: int, puzzle_hash_db: dict) -> bytes:
public_key = bytes(int_to_public_key(index))
puzzle = puzzle_for_pk(public_key)
puzzle_hash = puzzle.get_tree_hash()
puzzle_hash_db[puzzle_hash] = puzzle
return puzzle_hash
def make_fake_coin(index: int, puzzle_hash_db: dict) -> Coin:
"""
Make a fake coin with parent id equal to the index (ie. a genesis block coin)
"""
parent = index.to_bytes(32, "big")
puzzle_hash = puzzle_hash_for_index(index, puzzle_hash_db)
amount = 100000
return Coin(parent, puzzle_hash, uint64(amount))
def conditions_for_payment(coin) -> Program:
d: Dict = {} # a throwaway db since we don't care
new_puzzle_hash = puzzle_hash_for_index(int.from_bytes(coin.puzzle_hash, "big"), d)
return Program.to([[ConditionOpcode.CREATE_COIN, new_puzzle_hash, coin.amount]])
def make_spend_bundle(count: int) -> SpendBundle:
puzzle_hash_db: Dict = dict()
coins = [make_fake_coin(_, puzzle_hash_db) for _ in range(count)]
coin_solutions = []
for coin in coins:
puzzle_reveal = puzzle_hash_db[coin.puzzle_hash]
conditions = conditions_for_payment(coin)
solution = solution_for_conditions(conditions)
coin_solution = CoinSolution(coin, puzzle_reveal, solution)
coin_solutions.append(coin_solution)
spend_bundle = SpendBundle(coin_solutions, blspy.G2Element())
return spend_bundle
def make_block_generator(count: int) -> BlockGenerator:
spend_bundle = make_spend_bundle(count)
return simple_solution_generator(spend_bundle)
|
py | 7df904f7721d541f9325f74ae573c1e39694902b | from typing import Union, Callable, Generic, Tuple, List, Optional
import numpy as np
import ray
from ray.util.annotations import PublicAPI
from ray.data.dataset import Dataset
from ray.data.impl import sort
from ray.data.aggregate import AggregateFn, Count, Sum, Max, Min, \
Mean, Std, AggregateOnT
from ray.data.impl.block_list import BlockList
from ray.data.impl.remote_fn import cached_remote_fn
from ray.data.impl.progress_bar import ProgressBar
from ray.data.block import Block, BlockAccessor, BlockMetadata, \
T, U, KeyType
GroupKeyBaseT = Union[Callable[[T], KeyType], str]
GroupKeyT = Optional[Union[GroupKeyBaseT, List[GroupKeyBaseT]]]
AggregateOnTs = Union[AggregateOnT, List[AggregateOnT]]
@PublicAPI(stability="beta")
class GroupedDataset(Generic[T]):
"""Represents a grouped dataset created by calling ``Dataset.groupby()``.
The actual groupby is deferred until an aggregation is applied.
"""
def __init__(self, dataset: Dataset[T], key: GroupKeyT):
"""Construct a dataset grouped by key (internal API).
The constructor is not part of the GroupedDataset API.
Use the ``Dataset.groupby()`` method to construct one.
"""
self._dataset = dataset
if isinstance(key, list):
if len(key) > 1:
# TODO(jjyao) Support multi-key groupby.
raise NotImplementedError(
"Multi-key groupby is not supported yet")
else:
self._key = key[0]
else:
self._key = key
def aggregate(self, *aggs: AggregateFn) -> Dataset[U]:
"""Implements an accumulator-based aggregation.
This is a blocking operation.
Examples:
>>> grouped_ds.aggregate(AggregateFn(
... init=lambda k: [],
... accumulate=lambda a, r: a + [r],
... merge=lambda a1, a2: a1 + a2,
... finalize=lambda a: a
... ))
Args:
aggs: Aggregations to do.
Returns:
If the input dataset is simple dataset then the output is a simple
dataset of ``(k, v_1, ..., v_n)`` tuples where ``k`` is the groupby
key and ``v_i`` is the result of the ith given aggregation.
If the input dataset is an Arrow dataset then the output is an
Arrow dataset of ``n + 1`` columns where the first column is the
groupby key and the second through ``n + 1`` columns are the
results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
if len(aggs) == 0:
raise ValueError("Aggregate requires at least one aggregation")
# Handle empty dataset.
if self._dataset.num_blocks() == 0:
return self._dataset
blocks = list(self._dataset._blocks.iter_blocks())
num_mappers = len(blocks)
num_reducers = num_mappers
if self._key is None:
num_reducers = 1
boundaries = []
else:
boundaries = sort.sample_boundaries(
blocks, [(self._key, "ascending")]
if isinstance(self._key, str) else self._key, num_reducers)
partition_and_combine_block = cached_remote_fn(
_partition_and_combine_block).options(num_returns=num_reducers)
aggregate_combined_blocks = cached_remote_fn(
_aggregate_combined_blocks, num_returns=2)
map_results = np.empty((num_mappers, num_reducers), dtype=object)
for i, block in enumerate(blocks):
map_results[i, :] = partition_and_combine_block.remote(
block, boundaries, self._key, aggs)
map_bar = ProgressBar("GroupBy Map", len(map_results))
map_bar.block_until_complete([ret[0] for ret in map_results])
map_bar.close()
blocks = []
metadata = []
for j in range(num_reducers):
block, meta = aggregate_combined_blocks.remote(
num_reducers, self._key, aggs, *map_results[:, j].tolist())
blocks.append(block)
metadata.append(meta)
reduce_bar = ProgressBar("GroupBy Reduce", len(blocks))
reduce_bar.block_until_complete(blocks)
reduce_bar.close()
metadata = ray.get(metadata)
return Dataset(BlockList(blocks, metadata), self._dataset._epoch)
def _aggregate_on(self, agg_cls: type, on: Optional[AggregateOnTs], *args,
**kwargs):
"""Helper for aggregating on a particular subset of the dataset.
This validates the `on` argument, and converts a list of column names
or lambdas to a multi-aggregation. A null `on` results in a
multi-aggregation on all columns for an Arrow Dataset, and a single
aggregation on the entire row for a simple Dataset.
"""
aggs = self._dataset._build_multicolumn_aggs(
agg_cls, on, *args, skip_cols=self._key, **kwargs)
return self.aggregate(*aggs)
def count(self) -> Dataset[U]:
"""Compute count aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).count()
>>> ray.data.from_items([
... {"A": x % 3, "B": x} for x in range(100)]).groupby(
... "A").count()
Returns:
A simple dataset of ``(k, v)`` pairs or an Arrow dataset of
``[k, v]`` columns where ``k`` is the groupby key and ``v`` is the
number of rows with that key.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self.aggregate(Count())
def sum(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped sum aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).sum()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .sum(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").sum()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .sum(["B", "C"])
Args:
on: The data subset on which to compute the sum.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a sum of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise sum of all
columns.
Returns:
The sum result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, sum)`` tuples where ``k``
is the groupby key and ``sum`` is sum of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, sum_1, ..., sum_n)`` tuples where ``k`` is the groupby key
and ``sum_i`` is sum of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise sum column for each original column
in the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Sum, on)
def min(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped min aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).min()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .min(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").min()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .min(["B", "C"])
Args:
on: The data subset on which to compute the min.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a min of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise min of all
columns.
Returns:
The min result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, min)`` tuples where ``k``
is the groupby key and min is min of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, min_1, ..., min_n)`` tuples where ``k`` is the groupby key
and ``min_i`` is min of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise min column for each original column in
the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Min, on)
def max(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped max aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).max()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .max(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").max()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .max(["B", "C"])
Args:
on: The data subset on which to compute the max.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a max of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise max of all
columns.
Returns:
The max result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, max)`` tuples where ``k``
is the groupby key and ``max`` is max of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, max_1, ..., max_n)`` tuples where ``k`` is the groupby key
and ``max_i`` is max of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise max column for each original column in
the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Max, on)
def mean(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped mean aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).mean()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .mean(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").mean()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .mean(["B", "C"])
Args:
on: The data subset on which to compute the mean.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a mean of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise mean of all
columns.
Returns:
The mean result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, mean)`` tuples where ``k``
is the groupby key and ``mean`` is mean of all rows in that
group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, mean_1, ..., mean_n)`` tuples where ``k`` is the groupby
key and ``mean_i`` is mean of the outputs of the ith callable
called on each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise mean column for each original column
in the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Mean, on)
def std(self, on: Optional[AggregateOnTs] = None,
ddof: int = 1) -> Dataset[U]:
"""Compute grouped standard deviation aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).std()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .std(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").std(ddof=0)
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .std(["B", "C"])
NOTE: This uses Welford's online method for an accumulator-style
computation of the standard deviation. This method was chosen due to
it's numerical stability, and it being computable in a single pass.
This may give different (but more accurate) results than NumPy, Pandas,
and sklearn, which use a less numerically stable two-pass algorithm.
See
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
Args:
on: The data subset on which to compute the std.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a std of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise std of all
columns.
ddof: Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns:
The standard deviation result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, std)`` tuples where ``k``
is the groupby key and ``std`` is std of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, std_1, ..., std_n)`` tuples where ``k`` is the groupby key
and ``std_i`` is std of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise std column for each original column in
the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Std, on, ddof=ddof)
def _partition_and_combine_block(block: Block[T], boundaries: List[KeyType],
key: GroupKeyT,
aggs: Tuple[AggregateFn]) -> List[Block]:
"""Partition the block and combine rows with the same key."""
if key is None:
partitions = [block]
else:
partitions = BlockAccessor.for_block(block).sort_and_partition(
boundaries, [(key, "ascending")] if isinstance(key, str) else key,
descending=False)
return [BlockAccessor.for_block(p).combine(key, aggs) for p in partitions]
def _aggregate_combined_blocks(
num_reducers: int, key: GroupKeyT, aggs: Tuple[AggregateFn],
*blocks: Tuple[Block, ...]) -> Tuple[Block[U], BlockMetadata]:
"""Aggregate sorted and partially combined blocks."""
if num_reducers == 1:
blocks = [b[0] for b in blocks] # Ray weirdness
return BlockAccessor.for_block(blocks[0]).aggregate_combined_blocks(
list(blocks), key, aggs)
|
py | 7df9056461fc63c86143377720f8bcd7e0ef61f9 | import pytest
from starkware.starknet.testing.starknet import Starknet
from utils import TestSigner, contract_path
signer = TestSigner(123456789987654321)
@pytest.fixture(scope='module')
async def ownable_factory():
starknet = await Starknet.empty()
owner = await starknet.deploy(
contract_path("openzeppelin/account/Account.cairo"),
constructor_calldata=[signer.public_key]
)
ownable = await starknet.deploy(
contract_path("tests/mocks/Ownable.cairo"),
constructor_calldata=[owner.contract_address]
)
return starknet, ownable, owner
@pytest.mark.asyncio
async def test_constructor(ownable_factory):
_, ownable, owner = ownable_factory
expected = await ownable.get_owner().call()
assert expected.result.owner == owner.contract_address
@pytest.mark.asyncio
async def test_transfer_ownership(ownable_factory):
_, ownable, owner = ownable_factory
new_owner = 123
await signer.send_transaction(owner, ownable.contract_address, 'transfer_ownership', [new_owner])
executed_info = await ownable.get_owner().call()
assert executed_info.result == (new_owner,)
|
py | 7df906030122f734b37658b702c77d81fc594610 | #@+leo-ver=5-thin
#@+node:ekr.20040331151007: * @file ../plugins/niceNosent.py
#@+<< docstring >>
#@+node:ekr.20101112180523.5420: ** << docstring >>
""" Ensures that all descendants of @file-nosent nodes end
with exactly one newline, replaces all tabs with spaces, and
adds a newline before class and functions in the derived file.
"""
#@-<< docstring >>
#@@language python
#@@tabwidth -4
__version__ = "0.3"
#@+<< version history >>
#@+node:ekr.20040909122647: ** << version history >>
#@+at
#
# 0.2 EKR:
# - Use isAtNoSentinelsFileNode and atNoSentinelsFileNodeName.
# - Use g.os_path_x methods for better unicode support.
# 0.3 EKR:
# - Converted to 4.2 code base:
# - Use keywords.get('c') instead of g.top().
# - Use explicit positions everywhere.
# - removed reference to new_df.
#@-<< version history >>
#@+<< imports >>
#@+node:ekr.20040909122647.1: ** << imports >>
import leo.core.leoGlobals as g
#@-<< imports >>
NSPACES = ' '*4
nosentNodes = []
#@+others
#@+node:ekr.20050917082031: ** init
def init ():
'''Return True if the plugin has loaded successfully.'''
ok = not g.unitTesting
if ok:
g.registerHandler("save1",onPreSave)
g.registerHandler("save2",onPostSave)
g.plugin_signon(__name__)
return ok
#@+node:ekr.20040331151007.1: ** onPreSave
def onPreSave(tag=None, keywords=None):
"""Before saving an @nosent file, make sure that all nodes have a blank line at the end."""
global nosentNodes
c = keywords.get('c')
if c:
for p in c.all_positions():
if p.isAtNoSentinelsFileNode() and p.isDirty():
nosentNodes.append(p.copy())
for p2 in p.self_and_subtree():
s = p2.b
lastline = s.split("\n")[-1]
if lastline.strip():
c.setBodyString(p2,s+"\n")
#@+node:ekr.20040331151007.2: ** onPostSave
def onPostSave(tag=None, keywords=None):
"""After saving an @nosent file, replace all tabs with spaces."""
global nosentNodes
c = keywords.get('c')
if c:
at = c.atFileCommands
for p in nosentNodes:
g.red("node %s found" % p.h)
at.scanAllDirectives(p)
name = p.atNoSentinelsFileNodeName()
fname = g.os_path_join(at.default_directory,name)
f = open(fname,"r")
lines = f.readlines()
f.close()
#@+<< add a newline before def or class >>
#@+node:ekr.20040331151007.3: *3* << add a newline before def or class >>
for i, s in enumerate(lines):
ls = s.lstrip()
if ls.startswith("def ") or ls.startswith("class "):
try:
if lines[i-1].strip() != "":
lines[i] = "\n" + lines[i]
except IndexError:
pass
#@-<< add a newline before def or class >>
#@+<< replace tabs with spaces >>
#@+node:ekr.20040331151007.4: *3* << replace tabs with spaces >>
s = ''.join(lines)
fh = open(fname,"w")
fh.write(s.replace("\t",NSPACES))
fh.close()
#@-<< replace tabs with spaces >>
nosentNodes = []
#@-others
#@-leo
|
py | 7df9065a64c8948f9b805c64f0a39369a70ba39f | import datetime
import time
import numpy as np
import statistics as stat
from numpoisson.numpoisson import NumPoissonGeometry
npg = NumPoissonGeometry(3, 'x')
P_sl2 = {(1, 2): '-x3', (1, 3): '-x2', (2, 3): 'x1'}
h = '(x1**2)/2 + (x2**2)/2 - (x3**2)/2'
num_hamiltonian_vf_res = dict()
j = 2
for mesh_path in ['3Qmesh_10_2.npy', '3Qmesh_10_3.npy', '3Qmesh_10_4.npy', '3Qmesh_10_5.npy', '3Qmesh_10_6.npy', '3Qmesh_10_7.npy', '3Qmesh_10_8.npy']:
print(f'step {j}')
tiempos = dict()
with open(mesh_path, 'rb') as f:
mesh = np.load(f)
for k in range(25):
A = datetime.datetime.now()
npg.num_hamiltonian_vf(P_sl2, h, mesh, pt_output=True)
B = datetime.datetime.now()
tiempos[k] = (B - A).total_seconds()
promedio = stat.mean(tiempos.values())
desviacion = stat.pstdev(tiempos.values())
tiempos['promedios'] = promedio
tiempos['desviacion'] = desviacion
num_hamiltonian_vf_res[f'10**{j}'] = tiempos
j = j + 1
print(num_hamiltonian_vf_res)
print('Finish')
|
py | 7df906c5c64f6dfec543a9fe12c05b818b4b0595 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
VERSION_FILE = open('VERSION')
VERSION = VERSION_FILE.read().strip()
def readme():
"""Helper function for long_description"""
with open('README.rst') as readme_file:
return readme_file.read()
setup(
name='string-scanner',
version=VERSION,
url='http://github.com/sanscore/py-string-scanner/',
description='',
long_description=readme(),
keywords='',
author='Grant Welch',
author_email='gwelch925 at gmail.com',
license='Apache License 2.0',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=[
'regex',
],
setup_requires=[
'pytest-runner',
'wheel',
],
tests_require=[
'pytest',
'mock',
],
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Libraries",
],
include_package_data=True,
zip_safe=False,
)
|
py | 7df9079e3f4c0d43f675e3e34d97696b3fc7ffe7 | import io
import os
import sys
from subprocess import PIPE, Popen, STDOUT
import six
from conans.errors import ConanException
from conans.unicode import get_cwd
from conans.util.files import decode_text
from conans.util.runners import pyinstaller_bundle_env_cleaned
class _UnbufferedWrite(object):
def __init__(self, stream):
self._stream = stream
def write(self, *args, **kwargs):
self._stream.write(*args, **kwargs)
self._stream.flush()
class ConanRunner(object):
def __init__(self, print_commands_to_output=False, generate_run_log_file=False,
log_run_to_output=True, output=None):
self._print_commands_to_output = print_commands_to_output
self._generate_run_log_file = generate_run_log_file
self._log_run_to_output = log_run_to_output
self._output = output
def __call__(self, command, output=True, log_filepath=None, cwd=None, subprocess=False):
"""
@param command: Command to execute
@param output: Instead of print to sys.stdout print to that stream. Could be None
@param log_filepath: If specified, also log to a file
@param cwd: Move to directory to execute
"""
if output and isinstance(output, io.StringIO) and six.PY2:
# in py2 writing to a StringIO requires unicode, otherwise it fails
print("*** WARN: Invalid output parameter of type io.StringIO(), "
"use six.StringIO() instead ***")
stream_output = output if output and hasattr(output, "write") else self._output or sys.stdout
if hasattr(output, "flush"):
# We do not want output from different streams to get mixed (sys.stdout, os.system)
stream_output = _UnbufferedWrite(stream_output)
if not self._generate_run_log_file:
log_filepath = None
# Log the command call in output and logger
call_message = "\n----Running------\n> %s\n-----------------\n" % command
if self._print_commands_to_output and stream_output and self._log_run_to_output:
stream_output.write(call_message)
with pyinstaller_bundle_env_cleaned():
# No output has to be redirected to logs or buffer or omitted
if (output is True and not self._output and not log_filepath and self._log_run_to_output
and not subprocess):
return self._simple_os_call(command, cwd)
elif log_filepath:
if stream_output:
stream_output.write("Logging command output to file '%s'\n" % log_filepath)
with open(log_filepath, "a+") as log_handler:
if self._print_commands_to_output:
log_handler.write(call_message)
return self._pipe_os_call(command, stream_output, log_handler, cwd)
else:
return self._pipe_os_call(command, stream_output, None, cwd)
def _pipe_os_call(self, command, stream_output, log_handler, cwd):
try:
# piping both stdout, stderr and then later only reading one will hang the process
# if the other fills the pip. So piping stdout, and redirecting stderr to stdout,
# so both are merged and use just a single get_stream_lines() call
proc = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, cwd=cwd)
except Exception as e:
raise ConanException("Error while executing '%s'\n\t%s" % (command, str(e)))
def get_stream_lines(the_stream):
while True:
line = the_stream.readline()
if not line:
break
decoded_line = decode_text(line)
if stream_output and self._log_run_to_output:
try:
stream_output.write(decoded_line)
except UnicodeEncodeError: # be aggressive on text encoding
decoded_line = decoded_line.encode("latin-1", "ignore").decode("latin-1",
"ignore")
stream_output.write(decoded_line)
if log_handler:
# Write decoded in PY2 causes some ASCII encoding problems
# tried to open the log_handler binary but same result.
log_handler.write(line if six.PY2 else decoded_line)
get_stream_lines(proc.stdout)
# get_stream_lines(proc.stderr)
proc.communicate()
ret = proc.returncode
return ret
def _simple_os_call(self, command, cwd):
if not cwd:
return os.system(command)
else:
try:
old_dir = get_cwd()
os.chdir(cwd)
result = os.system(command)
except Exception as e:
raise ConanException("Error while executing"
" '%s'\n\t%s" % (command, str(e)))
finally:
os.chdir(old_dir)
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.