ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfe23677c03135dc65be45d6ae89bdaeb6af71a | """All internal ansible-lint rules."""
import copy
import glob
import importlib.util
import logging
import os
import re
from collections import defaultdict
from importlib.abc import Loader
from time import sleep
from typing import Iterator, List, Optional
import ansiblelint.utils
from ansiblelint._internal.rules import (
AnsibleParserErrorRule, BaseRule, LoadingFailureRule, RuntimeErrorRule,
)
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.skip_utils import append_skipped_rules, get_rule_skips_from_line
_logger = logging.getLogger(__name__)
class AnsibleLintRule(BaseRule):
def __repr__(self) -> str:
"""Return a AnsibleLintRule instance representation."""
return self.id + ": " + self.shortdesc
@staticmethod
def unjinja(text):
text = re.sub(r"{{.+?}}", "JINJA_EXPRESSION", text)
text = re.sub(r"{%.+?%}", "JINJA_STATEMENT", text)
text = re.sub(r"{#.+?#}", "JINJA_COMMENT", text)
return text
def create_matcherror(
self,
message: Optional[str] = None,
linenumber: int = 0,
details: str = "",
filename: Optional[str] = None,
tag: str = "") -> MatchError:
match = MatchError(
message=message,
linenumber=linenumber,
details=details,
filename=filename,
rule=copy.copy(self)
)
if tag:
match.tag = tag
return match
def matchlines(self, file, text) -> List[MatchError]:
matches: List[MatchError] = []
if not self.match:
return matches
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for (prev_line_no, line) in enumerate(text.split("\n")):
if line.lstrip().startswith('#'):
continue
rule_id_list = get_rule_skips_from_line(line)
if self.id in rule_id_list:
continue
result = self.match(line)
if not result:
continue
message = None
if isinstance(result, str):
message = result
m = self.create_matcherror(
message=message,
linenumber=prev_line_no + 1,
details=line,
filename=file['path'])
matches.append(m)
return matches
# TODO(ssbarnea): Reduce mccabe complexity
# https://github.com/ansible-community/ansible-lint/issues/744
def matchtasks(self, file: str, text: str) -> List[MatchError]: # noqa: C901
matches: List[MatchError] = []
if not self.matchtask:
return matches
if file['type'] == 'meta':
return matches
yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path'])
if not yaml:
return matches
yaml = append_skipped_rules(yaml, text, file['type'])
try:
tasks = ansiblelint.utils.get_normalized_tasks(yaml, file)
except MatchError as e:
return [e]
for task in tasks:
if self.id in task.get('skipped_rules', ()):
continue
if 'action' not in task:
continue
result = self.matchtask(file, task)
if not result:
continue
message = None
if isinstance(result, str):
message = result
task_msg = "Task/Handler: " + ansiblelint.utils.task_to_str(task)
m = self.create_matcherror(
message=message,
linenumber=task[ansiblelint.utils.LINE_NUMBER_KEY],
details=task_msg,
filename=file['path'])
matches.append(m)
return matches
@staticmethod
def _matchplay_linenumber(play, optional_linenumber):
try:
linenumber, = optional_linenumber
except ValueError:
linenumber = play[ansiblelint.utils.LINE_NUMBER_KEY]
return linenumber
def matchyaml(self, file: Lintable) -> List[MatchError]:
matches: List[MatchError] = []
if not self.matchplay:
return matches
yaml = ansiblelint.utils.parse_yaml_linenumbers(file.content, file.path)
# yaml returned can be an AnsibleUnicode (a string) when the yaml
# file contains a single string. YAML spec allows this but we consider
# this an fatal error.
if isinstance(yaml, str):
return [MatchError(
filename=file.path,
rule=LoadingFailureRule()
)]
if not yaml:
return matches
if isinstance(yaml, dict):
yaml = [yaml]
yaml = ansiblelint.skip_utils.append_skipped_rules(yaml, file.content, file.kind)
for play in yaml:
# Bug #849
if play is None:
continue
if self.id in play.get('skipped_rules', ()):
continue
result = self.matchplay(file, play)
if not result:
continue
if isinstance(result, tuple):
result = [result]
if not isinstance(result, list):
raise TypeError("{} is not a list".format(result))
for section, message, *optional_linenumber in result:
linenumber = self._matchplay_linenumber(play, optional_linenumber)
matches.append(self.create_matcherror(
message=message,
linenumber=linenumber,
details=str(section),
filename=file.path
))
return matches
def load_plugins(directory: str) -> List[AnsibleLintRule]:
"""Return a list of rule classes."""
result = []
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
spec = importlib.util.spec_from_file_location(pluginname, pluginfile)
# https://github.com/python/typeshed/issues/2793
if spec and isinstance(spec.loader, Loader):
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
obj = getattr(module, pluginname)()
result.append(obj)
return result
class RulesCollection(object):
def __init__(self, rulesdirs=None) -> None:
"""Initialize a RulesCollection instance."""
if rulesdirs is None:
rulesdirs = []
self.rulesdirs = ansiblelint.file_utils.expand_paths_vars(rulesdirs)
self.rules: List[BaseRule] = []
# internal rules included in order to expose them for docs as they are
# not directly loaded by our rule loader.
self.rules.extend(
[RuntimeErrorRule(), AnsibleParserErrorRule(), LoadingFailureRule()])
for rulesdir in self.rulesdirs:
_logger.debug("Loading rules from %s", rulesdir)
self.extend(load_plugins(rulesdir))
self.rules = sorted(self.rules)
def register(self, obj: AnsibleLintRule) -> None:
self.rules.append(obj)
def __iter__(self) -> Iterator[BaseRule]:
"""Return the iterator over the rules in the RulesCollection."""
return iter(self.rules)
def __len__(self):
"""Return the length of the RulesCollection data."""
return len(self.rules)
def extend(self, more: List[AnsibleLintRule]) -> None:
self.rules.extend(more)
def run(self, playbookfile, tags=set(), skip_list=frozenset()) -> List:
text = ""
matches: List = list()
error: Optional[IOError] = None
for i in range(3):
try:
with open(playbookfile['path'], mode='r', encoding='utf-8') as f:
text = f.read()
break
except IOError as e:
_logger.warning(
"Couldn't open %s - %s [try:%s]",
playbookfile['path'],
e.strerror,
i)
error = e
sleep(1)
continue
else:
return [MatchError(
message=str(error),
filename=playbookfile['path'],
rule=LoadingFailureRule())]
for rule in self.rules:
if not tags or not set(rule.tags).union([rule.id]).isdisjoint(tags):
rule_definition = set(rule.tags)
rule_definition.add(rule.id)
if set(rule_definition).isdisjoint(skip_list):
matches.extend(rule.matchlines(playbookfile, text))
matches.extend(rule.matchtasks(playbookfile, text))
matches.extend(rule.matchyaml(
Lintable(
playbookfile['path'],
content=text,
kind=playbookfile['type'])))
# some rules can produce matches with tags that are inside our
# skip_list, so we need to cleanse the matches
matches = [m for m in matches if m.tag not in skip_list]
return matches
def __repr__(self) -> str:
"""Return a RulesCollection instance representation."""
return "\n".join([rule.verbose()
for rule in sorted(self.rules, key=lambda x: x.id)])
def listtags(self) -> str:
tag_desc = {
"behaviour": "Indicates a bad practice or behavior",
"bug": "Likely wrong usage pattern",
"command-shell": "Specific to use of command and shell modules",
"core": "Related to internal implementation of the linter",
"deprecations": "Indicate use of features that are removed from Ansible",
"experimental": "Newly introduced rules, by default triggering only warnings",
"formatting": "Related to code-style",
"idempotency":
"Possible indication that consequent runs would produce different results",
"idiom": "Anti-pattern detected, likely to cause undesired behavior",
"metadata": "Invalid metadata, likely related to galaxy, collections or roles",
"module": "Incorrect module usage",
"readability": "Reduce code readability",
"repeatability": "Action that may produce different result between runs",
"resources": "Unoptimal feature use",
"safety": "Increase security risk",
"task": "Rules specific to tasks",
"unpredictability": "This will produce unexpected behavior when run",
}
tags = defaultdict(list)
for rule in self.rules:
for tag in rule.tags:
tags[tag].append(rule.id)
result = "# List of tags and how they are used\n"
for tag in sorted(tags):
desc = tag_desc.get(tag, None)
if desc:
result += f"{tag}: # {desc}\n"
else:
result += f"{tag}:\n"
result += f" rules: [{', '.join(tags[tag])}]\n"
return result
|
py | 7dfe250d1ddf74a5f872e280fbb5c078ff3c4481 | #!/usr/bin/env python3
from email.mime import audio
import eyed3
import os
import re
import requests
import signal
import subprocess
import sqlite3
import sys
import time
from datetime import datetime
from difflib import SequenceMatcher
from os.path import exists
from pprint import pprint
updated = 0
lidar_db = os.environ.get(
'LIDARR_DB',
"/home/dave/src/docker-media-center/config/lidarr/lidarr.db")
def get_lidarr_track_ids(cur, artist_name, album_name, track_name):
# update
sql = """
select Tracks.Id from ArtistMetadata, Artists, Tracks, Albums, AlbumReleases
where ArtistMetadata.id = Artists.ArtistMetadataId
and Artists.ArtistMetadataId = Tracks.ArtistMetadataId
and Tracks.AlbumReleaseId = AlbumReleases.Id
and Albums.id = AlbumReleases.AlbumId
and ArtistMetadata.Name = ?
and AlbumReleases.Title = ?
and Tracks.Title = ?
"""
cur.execute(sql, (artist_name, album_name, track_name,))
result = cur.fetchall()
if len(result) == 0:
return -1
return [X[0] for X in result]
def get_lidarr_album_id(cur, artist_name, album_name, track_name):
# update
sql = """
select Albums.Id from ArtistMetadata, Artists, Tracks, Albums, AlbumReleases
where ArtistMetadata.id = Artists.ArtistMetadataId
and Artists.ArtistMetadataId = Tracks.ArtistMetadataId
and Tracks.AlbumReleaseId = AlbumReleases.Id
and Albums.id = AlbumReleases.AlbumId
and ArtistMetadata.Name = ?
and AlbumReleases.Title = ?
and Tracks.Title = ?
limit 1
"""
cur.execute(sql, (artist_name, album_name, track_name,))
result = cur.fetchall()
if len(result) == 0:
return -1
return result[0][0]
def set_lidarr_track_trackfield(con, cur, TrackFileId, track_id):
# update
cur.execute("UPDATE Tracks SET TrackFileId=? WHERE id = ?",
(TrackFileId, track_id,))
con.commit()
def set_lidarr_trackfile_album_id(con, cur, AlbumId, Id):
# update
cur.execute("UPDATE TrackFiles SET AlbumId=? WHERE id=?",
(AlbumId, Id,))
con.commit()
def lidarr_match_fieldtrack_id(con, cur, id, path):
global updated
file = path.split("/", 4)[-1]
parts = file.split("-", 3)
artist = parts[0].strip()
album = parts[1].strip()
track = parts[2].strip().replace(".mp3", "")
track_ids = get_lidarr_track_ids(cur, artist, album, track)
print(artist)
print(album)
print(track)
print(track_ids)
if track_ids == -1:
return
for x in track_ids:
set_lidarr_track_trackfield(con, cur, id, x)
updated += 1
def lidarr_match_album_id(con, cur, id, path):
global updated
file = path.split("/", 4)[-1]
parts = file.split("-", 3)
artist = ""
album = ""
track = ""
if len(parts) == 3:
artist = parts[0].strip()
album = parts[1].strip()
track = parts[2].strip()
elif len(parts) == 4:
artist = parts[0].strip()
album = parts[1].strip()
track = parts[3].strip()
track = track.replace(".mp3", "").replace(".flac", "")
album_id = get_lidarr_album_id(cur, artist, album, track)
print(artist)
print(album)
print(track)
print(album_id)
if album_id == -1:
return
set_lidarr_trackfile_album_id(con, cur, album_id, id)
updated += 1
def iterate_unmapped():
global updated
con = sqlite3.connect(lidar_db)
cur = con.cursor()
con.set_trace_callback(print)
updated = 0
cur.execute("SELECT * FROM TrackFiles WHERE id NOT IN (SELECT TrackFileId FROM Tracks)")
result = cur.fetchall()
if len(result) == 0:
con.close()
return
for row in result:
lidarr_match_fieldtrack_id(con, cur, row[0], row[9])
print("Total : " + str(len(result)))
print("Updated : " + str(updated))
updated = 0
cur.execute("SELECT * FROM TrackFiles")
result = cur.fetchall()
if len(result) == 0:
con.close()
return
for row in result:
lidarr_match_album_id(con, cur, row[0], row[9])
con.close()
print("Total : " + str(len(result)))
print("Updated : " + str(updated))
iterate_unmapped() |
py | 7dfe25d071eb6361271c49b416fe8e610a1aa603 | from latextree.parser.parser import Parser
import pytest
test_strings = [
r'\newtheorem{lemma}{Lemma}[theorem]',
r'\newenvironment{myenv}{pre}{post}',
r'\newcommand{\strong}[1]{\textbf{#1}}',
r'\newcommand{\nice}[1]{\textit{#1}}',
r'\renewcommand{\emph}[1]{\textbf{#1}}',
r'\def\hello{shw mae}',
r'\def\strong[1]{\textbf{#1}}',
r'\newenvironment{myenv}{}{} \begin{myenv}inside\end{myenv}',
r'\newenvironment{myenv}[1]{\textbf{start #1}}{\textbf{finish}} \begin{myenv}{twat}inside\end{myenv}',
r'\newenvironment{myenv}[2]{\textbf{start #1--#2}}{\par\textbf{finish}}',
r'\newtheorem{theo}{Theorem}\begin{theo} inside \end{theo}',
r'\newtheorem{theo}{Theorem}[section]\begin{theo} inside \end{theo}',
r'\newtheorem{lem}[theo]{Lemma}\begin{theo} inside \end{theo}\begin{lem} hello \end{lem}',
r'\newcommand{\hello}[1]{Hi #1} \hello{Bob}',
r'\newcommand{\hello}[1]{Hi #1} \hello{Bob $\alpha=\beta$}',
r'\newcommand{\isa}[2]{#1 is a #2} \isa{Bingo}{dog}',
]
@pytest.mark.parametrize("test_input", test_strings)
def test_parser(test_input):
p = Parser()
root = p.parse(test_input)
assert test_input == root.chars()
|
py | 7dfe25d38c16bfdae5935561b41b55bc41a884ec | import numpy as np
import itertools
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
from random import *
class Scenario(BaseScenario):
def __init__(self):
self.one_hot_array = []
self.colours = []
self.obstacle_count = 0
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 4
num_landmarks = 4
num_obstacles = 12
# generate one-hot encoding for unique hidden goals
self.one_hot_array = list(itertools.product([0, 1], repeat=num_landmarks))
# generate colours for goal identification
for _ in range(num_landmarks):
self.colours.append(np.random.uniform(-1, +1, 3))
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.10
agent.color = self.colours[i]
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.color = self.colours[i]
landmark.id = self.one_hot_array[2**i]
# add obstacles
world.obstacles = [Landmark() for i in range(num_obstacles)]
for i, obstacle in enumerate(world.obstacles):
obstacle.name = 'obstacle %d' % i
obstacle.collide = True
obstacle.movable = False
obstacle.size = 0.40
obstacle.boundary = False
obstacle.color = np.array([0.25, 0.25, 0.25])
# self.create_wall(world, obstacle, 10, -0.2, -1, -0.2, -0.2)
# make initial conditions
self.reset_world(world)
return world
def assign_goals(self, i, agent):
# assign each agent to a unique set of goals in one-hot encoding
agent.hidden_goals = self.one_hot_array[2**i]
def reset_world(self, world):
# properties for agents
for i, agent in enumerate(world.agents):
pass
# properties for landmarks
for i, agent in enumerate(world.agents):
pass
# properties for obstacles
for i, obstacle in enumerate(world.obstacles):
pass
# set initial states
starts = [[0.00, -0.70], [0.00, 0.70], [-0.70, 0.00], [0.70, 0.00]]
for i, agent in enumerate(world.agents):
r = randint(0,len(starts)-1)
agent.state.p_pos = np.array(starts[r])
starts.remove(starts[r])
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
self.assign_goals(i, agent)
starts = [[0.00, -0.70], [0.00, 0.70], [-0.70, 0.00], [0.70, 0.00]]
for i, landmark in enumerate(world.landmarks):
r = randint(0,len(starts)-1)
landmark.state.p_pos = np.array(starts[r])
starts.remove(starts[r])
landmark.state.p_vel = np.zeros(world.dim_p)
for i, obstacle in enumerate(world.obstacles):
if i > 3:
obstacle.size = 0.2
if i > 7:
obstacle.size = 0.1
positions = [[-0.50, -0.50], [-0.50, 0.50], [0.50, -0.50], [0.50, 0.50],
[-0.30, -0.30], [-0.30, 0.30], [0.30, -0.30], [0.30, 0.30],
[-0.20, -0.20], [-0.20, 0.20], [0.20, -0.20], [0.20, 0.20]]
obstacle.state.p_pos = np.array(positions[i])
obstacle.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
dists = []
for l in world.landmarks:
if l.id == agent.hidden_goals:
rew -= np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 7
collisions += 1
for o in world.obstacles:
if self.is_collision(o, agent):
rew -= 0
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each relevant landmark, penalized for collisions
rew = 0
dists = []
for l in world.landmarks:
if l.id == agent.hidden_goals:
rew -= np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
if self.is_collision(l, agent):
rew += 0
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 7
for o in world.obstacles:
if self.is_collision(o, agent):
rew -= 0
# agents are penalized for exiting the screen, so that they can converge faster
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
return rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
for entity in world.obstacles: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks: # world.entities:
entity_color.append(entity.color)
for entity in world.obstacles: # world.entities:
entity_color.append(entity.color)
# communication of all other agents
comm = []
other_pos = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm)
|
py | 7dfe269e895b830a5d788e0b79985ccbd511d45e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DomainTopicsOperations(object):
"""DomainTopicsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Constant value: "2019-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-06-01"
self.config = config
def get(
self, resource_group_name, domain_name, domain_topic_name, custom_headers=None, raw=False, **operation_config):
"""Get a domain topic.
Get properties of a domain topic.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain_topic_name: Name of the topic.
:type domain_topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DomainTopic or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.eventgrid.models.DomainTopic or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'domainTopicName': self._serialize.url("domain_topic_name", domain_topic_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DomainTopic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{domainTopicName}'}
def _create_or_update_initial(
self, resource_group_name, domain_name, domain_topic_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'domainTopicName': self._serialize.url("domain_topic_name", domain_topic_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('DomainTopic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, domain_name, domain_topic_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or update a domain topic.
Asynchronously creates or updates a new domain topic with the specified
parameters.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain_topic_name: Name of the domain topic.
:type domain_topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns DomainTopic or
ClientRawResponse<DomainTopic> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.eventgrid.models.DomainTopic]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.eventgrid.models.DomainTopic]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain_topic_name=domain_topic_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('DomainTopic', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{domainTopicName}'}
def _delete_initial(
self, resource_group_name, domain_name, domain_topic_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str'),
'domainTopicName': self._serialize.url("domain_topic_name", domain_topic_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, domain_name, domain_topic_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Delete a domain topic.
Delete existing domain topic.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Name of the domain.
:type domain_name: str
:param domain_topic_name: Name of the domain topic.
:type domain_topic_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
domain_name=domain_name,
domain_topic_name=domain_topic_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics/{domainTopicName}'}
def list_by_domain(
self, resource_group_name, domain_name, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""List domain topics.
List all the topics in a domain.
:param resource_group_name: The name of the resource group within the
user's subscription.
:type resource_group_name: str
:param domain_name: Domain name.
:type domain_name: str
:param filter: The query used to filter the search results using OData
syntax. Filtering is permitted on the 'name' property only and with
limited number of OData operations. These operations are: the
'contains' function as well as the following logical operations: not,
and, or, eq (for equal), and ne (for not equal). No arithmetic
operations are supported. The following is a valid filter example:
$filter=contains(namE, 'PATTERN') and name ne 'PATTERN-1'. The
following is not a valid filter example: $filter=location eq 'westus'.
:type filter: str
:param top: The number of results to return per page for the list
operation. Valid range for top parameter is 1 to 100. If not
specified, the default number of results to be returned is 20 items
per page.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of DomainTopic
:rtype:
~azure.mgmt.eventgrid.models.DomainTopicPaged[~azure.mgmt.eventgrid.models.DomainTopic]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_domain.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'domainName': self._serialize.url("domain_name", domain_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DomainTopicPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DomainTopicPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_domain.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventGrid/domains/{domainName}/topics'}
|
py | 7dfe271ac6fcfcee1c2dcb0976e01a5d955e487f | # MIT License
#
# Copyright (c) 2022 by exersalza
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
API_VERSION = 'v1' # Please let the 'v' at the beginning otherwise it will not work
def default_header(jwt):
return {'Authorization': f'Bearer {jwt}',
'Content-Type': 'application/json',
'Accept': 'application/json'}
|
py | 7dfe2885037494e14abc21b8eb06ba3a4e4fc73f | """
ASGI config for hello project.
It exposes the ASGI callable as a module-level variable named `application`.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello.settings')
application = get_asgi_application()
|
py | 7dfe28ce643b7b9dfdc6deb719836b6a27612627 | from .losses import (weighted_nll_loss, weighted_cross_entropy,
weighted_binary_cross_entropy, sigmoid_focal_loss,
py_sigmoid_focal_loss, weighted_sigmoid_focal_loss,
mask_cross_entropy, smooth_l1_loss, weighted_smoothl1,
balanced_l1_loss, weighted_balanced_l1_loss, iou_loss,
bounded_iou_loss, weighted_iou_loss, accuracy
####################################################
# BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss
)
__all__ = [
'weighted_nll_loss', 'weighted_cross_entropy',
'weighted_binary_cross_entropy', 'sigmoid_focal_loss',
'py_sigmoid_focal_loss', 'weighted_sigmoid_focal_loss',
'mask_cross_entropy', 'smooth_l1_loss', 'weighted_smoothl1',
'balanced_l1_loss', 'weighted_balanced_l1_loss', 'bounded_iou_loss',
'weighted_iou_loss', 'iou_loss', 'accuracy'
#############################################
# 'BoundedIoULoss', 'CIoULoss', 'DIoULoss', 'GIoULoss', 'IoULoss'
]
|
py | 7dfe290b44a9fe9dacb47e9bd0311bc0d571996d | import json
import os
import socket
import time
from multiprocessing import Process
import morepath
import morpfw
import morpfw.tests
import requests
import transaction
import webob
import yaml
from more.basicauth import BasicAuthIdentityPolicy
from more.jwtauth import JWTIdentityPolicy
from morpfw.authn.pas.exc import UserExistsError
from morpfw.cli import cli
from morpfw.main import create_admin as morpfw_create_admin
from morpfw.main import create_app
from morpfw.request import request_factory
from requests_oauthlib import OAuth2Session
from requests_oauthlib.oauth2_session import (
InsecureTransportError,
TokenExpiredError,
TokenUpdated,
is_secure_transport,
log,
)
from webtest import TestApp as Client
def make_request(appobj):
request = appobj.request_class(
environ={
"PATH_INFO": "/",
"wsgi.url_scheme": "http",
"SERVER_NAME": "localhost",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.1",
},
app=appobj,
)
return request
def get_client(config="settings.yml", **kwargs):
param = cli.load(config)
morepath.scan(morpfw)
request = request_factory(param["settings"], app_factory_opts=kwargs, scan=False)
c = Client(request.environ["morpfw.wsgi.app"])
c.mfw_request = request
return c
def create_admin(client: Client, user: str, password: str, email: str):
appobj = client.app
morpfw_create_admin(client, user, password, email)
transaction.commit()
def start_scheduler(app):
settings = app._raw_settings
hostname = socket.gethostname()
ss = settings["configuration"]["morpfw.celery"]
sched = app.celery.Beat(hostname="testscheduler.%s" % hostname, **ss)
proc = Process(target=sched.run)
proc.daemon = True
proc.start()
time.sleep(2)
return proc
def start_worker(app):
settings = app._raw_settings
hostname = socket.gethostname()
ss = settings["configuration"]["morpfw.celery"]
worker = app.celery.Worker(hostname="testworker.%s" % hostname, **ss)
proc = Process(target=worker.start)
proc.daemon = True
proc.start()
time.sleep(2)
return proc
class WebTestResponse(object):
def __init__(self, response: webob.Response):
self.wt_response = response
@property
def status_code(self):
return self.wt_response.status_code
@property
def request(self):
return self.wt_response.request
@property
def headers(self):
return self.wt_response.headers
@property
def text(self):
return self.wt_response.text
class FakeRequest(object):
def __init__(self):
self.headers = {}
class WebTestOAuth2Session(OAuth2Session):
def __init__(self, wt_client, **kwargs):
self.wt_client = wt_client
super().__init__(**kwargs)
def request(
self,
method,
url,
data=None,
headers=None,
auth=None,
withhold_token=None,
client_id=None,
client_secret=None,
**kwargs
):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token and not withhold_token:
log.debug(
"Invoking %d protected resource request hooks.",
len(self.compliance_hook["protected_request"]),
)
for hook in self.compliance_hook["protected_request"]:
log.debug("Invoking hook %s.", hook)
url, headers, data = hook(url, headers, data)
log.debug("Adding token %s to request.", self.token)
try:
url, headers, data = self._client.add_token(
url, http_method=method, body=data, headers=headers
)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug(
"Auto refresh is set, attempting to refresh at %s.",
self.auto_refresh_url,
)
# We mustn't pass auth twice.
auth = kwargs.pop("auth", None)
if client_id and client_secret and (auth is None):
log.debug(
'Encoding client_id "%s" with client_secret as Basic auth credentials.',
client_id,
)
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
token = self.refresh_token(
self.auto_refresh_url, auth=auth, **kwargs
)
if self.token_updater:
log.debug(
"Updating token to %s using %s.", token, self.token_updater
)
self.token_updater(token)
url, headers, data = self._client.add_token(
url, http_method=method, body=data, headers=headers
)
else:
raise TokenUpdated(token)
else:
raise
log.debug("Requesting url %s using method %s.", url, method)
log.debug("Supplying headers %s and data %s", headers, data)
log.debug("Passing through key word arguments %s.", kwargs)
if "json" in kwargs:
f = getattr(self.wt_client, "%s_json" % method.lower())
data = kwargs["json"]
else:
f = getattr(self.wt_client, method.lower())
if auth:
fr = FakeRequest()
auth(fr)
headers.update(fr.headers)
resp = f(url, data, headers=headers)
return WebTestResponse(resp)
|
py | 7dfe29b3d13f0380e315a227fe1d6a8dbc65099f | from codes.a_config._rl_parameters.off_policy.parameter_td3 import PARAMETERS_TD3, TD3ActionType, TD3ActionSelectorType
from codes.e_utils.names import *
from codes.a_config.parameters_general import PARAMETERS_GENERAL
class PARAMETERS_SYNCRONIZE_TD3(PARAMETERS_GENERAL, PARAMETERS_TD3):
ENVIRONMENT_ID = EnvironmentName.SYNCRONIZE_V0
RL_ALGORITHM = RLAlgorithmName.TD3_V0
DEEP_LEARNING_MODEL = DeepLearningModelName.TD3_MLP
MAX_EPISODE_STEP_AT_PLAY = 10000000000
MAX_EPISODE_STEP = 1000
MAX_GLOBAL_STEP = 10000000
LEARNING_RATE = 0.001
ACTOR_LEARNING_RATE = 0.0002
GAMMA = 0.99
BATCH_SIZE = 128
AVG_EPISODE_SIZE_FOR_STAT = 50
N_STEP = 4
OMEGA = False
PER_PROPORTIONAL = False
PER_RANK_BASED = False
TEST_NUM_EPISODES = 3
DISTRIBUTIONAL = False
NOISY_NET = False
ACT_NOISE = 1
TRAIN_ACTION_NOISE_CLIP = 1
NOISE_ENABLED = True
OU_SIGMA = 2.5
TRAIN_ONLY_AFTER_EPISODE = False
NUM_TRAIN_ONLY_AFTER_EPISODE = 100
TAU = 0.0005
ACTION_SCALE = 250.0
EPSILON_INIT = 1.0
EPSILON_MIN = 0.01
EPSILON_MIN_STEP = 1000000
TYPE_OF_TD3_ACTION = TD3ActionType.GAUSSIAN_NOISE_WITH_EPSILON
TYPE_OF_TD3_ACTION_SELECTOR = TD3ActionSelectorType.SOMETIMES_BLOW_ACTION_SELECTOR
TRAIN_STEP_FREQ = 2
POLICY_UPDATE_FREQUENCY = 2 * TRAIN_STEP_FREQ
UNIT_TIME = 0.006
VERBOSE_TO_LOG = False
|
py | 7dfe29b544cef59dc4a88459114ce6a25dc4788d | n=[2,3,4,5,6] # Atomic valences
nc=[2,3,4,5,6] # Atomic valences for ctqmc
l=2 # Orbital angular momentum of the shel
J=1.0 # Slater integrals F2=J*11.9219653179191 from the atomic physics program (2009 Nov 19, J=0.3 originally, changed by chuckyee)
cx=0.0 # spin-orbit coupling from the atomic physics program
qOCA=1 # OCA diagrams are computes in addition to NCA diagrams
Eoca=1. # If the atomi energy of any state, involved in the diagram, is higher that Eoca from the ground state, the diagram ms is neglected
mOCA=1e-3 # If maxtrix element of OCA diagram is smaller, it is neglected
Ncentral=[4] # OCA diagrams are selected such that central occupancy is in Ncentral
|
py | 7dfe2a0e9146559c20b589e8c39cf1977388afd8 | #Hello every one!! I am Pratyush and below is a code to find the maximum sub-array sum of a given array using Kadane's algorithm.
''' Sub-array is a continuous part of an array. And the optimal approach(with time complexity of O[n]) to find maximum sub-array sum in an array is Kadane's algo.
In this we traverse in the array with storing the continuous sum in a variable current sum(curr_sum) which is allotted value of zero if it becomes negative. And maximum value of this current sum would be stored in a variable maximum sum(max_sum) which will be our answer'''
#algorithm :
def maximum_subarray_Sum(arr,n):
curr_sum , max_sum = 0, 0
for i in range(0, n):
curr_sum += arr[i]
if curr_sum < 0:
curr_sum = 0
max_sum = max(curr_sum, max_sum)
return max_sum
# test examples:
arr=[]
n = int(input("Enter number of elements : "))
print("\n Now enter the numbers one by one(press enter after inserting each element) : ")
for i in range(0, n):
x = int(input())
arr.append(x)
print("\n Maximum Sub Array Sum Is" , maximum_subarray_Sum(arr,n))
#example: if n=6 and arr=[-1, 4 , 4 , -6 , 7 , -4] then maximum sub-array sum is 9 and the sub array for this sum is [4 , 4 , -6 , 7 ].
|
py | 7dfe2aefcd860b4330a231a2255b0a7e3eb09556 | #!/usr/bin/env python
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'fake-key'
SITE_ID = 1
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.contenttypes',
'jsonfield_schema',
'tests'
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': '',
'USER': 'postgres',
'PASSWORD': 'postgres',
'host': 'localhost',
'PORT': '5432',
'TEST': {
'CHARSET': 'UTF8',
'NAME': 'json_test'
}
}
}
|
py | 7dfe2c847e6d48fb4d15c6e9175688406a40b7cd | #!/usr/bin/env python
r"""Computes the partition map for a segmentation.
For every labeled voxel of the input volume, computes the fraction of identically
labeled voxels within a neighborhood of radius `lom_radius`, and then quantizes
that number according to `thresholds`.
Sample invocation:
python compute_partitions.py \
--input_volume third_party/neuroproof_examples/training_sample2/groundtruth.h5:stack \
--output_volume af.h5:af \
--thresholds 0.025,0.05,0.075,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9 \
--lom_radius 16,16,16 \
--min_size 10000
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
from ffn.inference import segmentation
from ffn.inference import storage
from ffn.utils import bounding_box
import h5py
import numpy as np
from scipy.ndimage import filters
from mpi4py import MPI
from time import time
FLAGS = flags.FLAGS
flags.DEFINE_string('input_volume', None,
'Segmentation volume as <volume_path>:<dataset>, where'
'volume_path points to a HDF5 volume.')
flags.DEFINE_string('output_volume', None,
'Volume in which to save the partition map, as '
'<volume_path>:<dataset>.')
flags.DEFINE_list('thresholds', None,
'List of activation voxel fractions used for partitioning.')
flags.DEFINE_list('lom_radius', None,
'Local Object Mask (LOM) radii as (x, y, z).')
flags.DEFINE_list('id_whitelist', None,
'Whitelist of object IDs for which to compute the partition '
'numbers.')
flags.DEFINE_list('exclusion_regions', None,
'List of (x, y, z, r) tuples specifying spherical regions to '
'mark as excluded (i.e. set the output value to 255).')
flags.DEFINE_string('mask_configs', None,
'MaskConfigs proto in text format. Any locations where at '
'least one voxel of the LOM is masked will be marked as '
'excluded.')
flags.DEFINE_integer('min_size', 10000,
'Minimum number of voxels for a segment to be considered for '
'partitioning.')
_comm = MPI.COMM_WORLD
_size = _comm.Get_size()
_rank = _comm.Get_rank()
def _summed_volume_table(val):
"""Computes a summed volume table of 'val'."""
val = val.astype(np.int32)
svt = val.cumsum(axis=0).cumsum(axis=1).cumsum(axis=2)
return np.pad(svt, [[1, 0], [1, 0], [1, 0]], mode='constant')
def _query_summed_volume(svt, diam):
"""Queries a summed volume table.
Operates in 'VALID' mode, i.e. only computes the sums for voxels where the
full diam // 2 context is available.
Args:
svt: summed volume table (see _summed_volume_table)
diam: diameter (z, y, x tuple) of the area within which to compute sums
Returns:
sum of all values within a diam // 2 radius (under L1 metric) of every voxel
in the array from which 'svt' was built.
"""
return (
svt[diam[0]:, diam[1]:, diam[2]:] - svt[diam[0]:, diam[1]:, :-diam[2]] -
svt[diam[0]:, :-diam[1], diam[2]:] - svt[:-diam[0], diam[1]:, diam[2]:] +
svt[:-diam[0], :-diam[1], diam[2]:] + svt[:-diam[0], diam[1]:, :-diam[2]]
+ svt[diam[0]:, :-diam[1], :-diam[2]] -
svt[:-diam[0], :-diam[1], :-diam[2]])
def load_mask(mask_configs, box, lom_diam_zyx):
if mask_configs is None:
return None
mask = storage.build_mask(mask_configs.masks, box.start[::-1],
box.size[::-1])
svt = _summed_volume_table(mask)
mask = _query_summed_volume(svt, lom_diam_zyx) >= 1
return mask
def get_slice(n,p,r):
"""
Distribute n consecutive things (rows of a matrix , elements of a 1D array)
as evenly as possible over p processors and return the slice for rank r.
Uneven workload (differs by 1 at most) is on the initial ranks.
Parameters
----------
n: int, Total number of things to be distributed.
p: int, Total number of processes
r: int, ID of the process (i.e. MPI rank)
Returns
----------
python slice object
"""
rstart = 0
rend = n
if p >= n:
if r < n:
rstart = r
rend = r + 1
else:
rstart = 0
rend = 0
else:
d = n // p
remainder = n % p
rstart = d * r
rend = d * (r+1)
if remainder:
if r >= remainder:
rstart += remainder
rend += remainder
else:
rstart += r
rend += r + 1
return slice(rstart, rend)
def compute_partitions(seg_array,
thresholds,
lom_radius,
id_whitelist=None,
exclusion_regions=None,
mask_configs=None,
min_size=10000):
"""Computes quantized fractions of active voxels in a local object mask.
Args:
thresholds: list of activation voxel fractions to use for partitioning.
lom_radius: LOM radii as [x, y, z]
id_whitelist: (optional) whitelist of object IDs for which to compute the
partition numbers
exclusion_regions: (optional) list of x, y, z, r tuples specifying regions
to mark as excluded (with 255). The regions are spherical, with
(x, y, z) definining the center of the sphere and 'r' specifying its
radius. All values are in voxels.
mask_configs: (optional) MaskConfigs proto; any locations where at least
one voxel of the LOM is masked will be marked as excluded (255).
Returns:
tuple of:
corner of output subvolume as (x, y, z)
uint8 ndarray of active fraction voxels
"""
seg_array = segmentation.clear_dust(seg_array, min_size=min_size)
assert seg_array.ndim == 3
lom_radius = np.array(lom_radius)
lom_radius_zyx = lom_radius[::-1]
lom_diam_zyx = 2 * lom_radius_zyx + 1
def _sel(i):
if i == 0:
return slice(None)
else:
return slice(i, -i)
valid_sel = [_sel(x) for x in lom_radius_zyx]
output = np.zeros(seg_array[valid_sel].shape, dtype=np.uint8)
corner = lom_radius
if exclusion_regions is not None:
sz, sy, sx = output.shape
hz, hy, hx = np.mgrid[:sz, :sy, :sx]
hz += corner[2]
hy += corner[1]
hx += corner[0]
for x, y, z, r in exclusion_regions:
mask = (hx - x)**2 + (hy - y)**2 + (hz - z)**2 <= r**2
output[mask] = 255
labels = set(np.unique(seg_array))
if _rank == 0:
logging.info('Number of labels: %d', len(labels))
logging.info(labels)
logging.info(id_whitelist)
if id_whitelist is not None:
id_whitelist = {int(x) for x in id_whitelist}
labels &= id_whitelist
if _rank == 0:
logging.info('Labels to process after whitelist: %d', len(labels))
mask = load_mask(mask_configs,
bounding_box.BoundingBox(
start=(0, 0, 0), size=seg_array.shape[::-1]),
lom_diam_zyx)
if mask is not None:
output[mask] = 255
fov_volume = np.prod(lom_diam_zyx)
labelsarray = np.array(list(labels),dtype=np.int32)
# Don't create a mask for the background component.
labelsarray = labelsarray[labelsarray != 0]
labelsarray.sort()
labelsarray = labelsarray[get_slice(len(labelsarray),_size,_rank)]
reducedoutput = np.zeros(seg_array[valid_sel].shape, dtype=np.uint8)
if _rank == 0:
logging.info('Labels to process: %d', len(labels))
logging.info('Labels to process on rank 0: %d', len(labelsarray))
for l in labelsarray:
tstart = time()
object_mask = (seg_array == l)
svt = _summed_volume_table(object_mask)
active_fraction = _query_summed_volume(svt, lom_diam_zyx) / fov_volume
assert active_fraction.shape == output.shape
# Drop context that is only necessary for computing the active fraction
# (i.e. one LOM radius in every direction).
object_mask = object_mask[valid_sel]
# TODO(mjanusz): Use np.digitize here.
for i, th in enumerate(thresholds):
output[object_mask & (active_fraction < th) & (output == 0)] = i + 1
output[object_mask & (active_fraction >= thresholds[-1]) &
(output == 0)] = len(thresholds) + 1
if _rank == 0:
logging.info('Done processing %d in %f seconds', l, time()-tstart)
_comm.Reduce(output,reducedoutput,MPI.SUM,root=0)
if _rank == 0:
logging.info('Nonzero values: %d', np.sum(output > 0))
return corner, reducedoutput
def adjust_bboxes(bboxes, lom_radius):
ret = []
for bbox in bboxes:
bbox = bbox.adjusted_by(start=lom_radius, end=-lom_radius)
if np.all(bbox.size > 0):
ret.append(bbox)
return ret
def main(argv):
del argv # Unused.
tzero = time()
path, dataset = FLAGS.input_volume.split(':')
if _rank == 0:
logging.info('Read hdf5 file {}'.format(path))
with h5py.File(path,'r') as f:
segmentation = f[dataset]
if _rank == 0:
logging.info('Done reading.')
bboxes = []
for name, v in segmentation.attrs.items():
if name.startswith('bounding_boxes'):
for bbox in v:
bboxes.append(bounding_box.BoundingBox(bbox[0], bbox[1]))
if not bboxes:
bboxes.append(
bounding_box.BoundingBox(
start=(0, 0, 0), size=segmentation.shape[::-1]))
shape = segmentation.shape
lom_radius = [int(x) for x in FLAGS.lom_radius]
if _rank == 0:
logging.info('Compute partitions')
logging.info('Segmantion shape: {}'.format(shape))
logging.info('Bounding boxes: {}'.format(bboxes))
corner, partitions = compute_partitions(
segmentation[...], [float(x) for x in FLAGS.thresholds], lom_radius,
FLAGS.id_whitelist, FLAGS.exclusion_regions, FLAGS.mask_configs,
FLAGS.min_size)
bboxes = adjust_bboxes(bboxes, np.array(lom_radius))
path, dataset = FLAGS.output_volume.split(':')
if _rank == 0:
logging.info('Partition shape : {}'.format(partitions.shape))
logging.info('Bounding boxes : {}'.format(bboxes))
logging.info('Corner : {}'.format(corner))
logging.info('Creating hdf5 file for the partitions...')
with h5py.File(path, 'w') as f:
ds = f.create_dataset(dataset, shape=shape, dtype=np.uint8, fillvalue=255,
chunks=True, compression='gzip')
s = partitions.shape
ds[corner[2]:corner[2] + s[0],
corner[1]:corner[1] + s[1],
corner[0]:corner[0] + s[2]] = partitions
ds.attrs['bounding_boxes'] = [(b.start, b.size) for b in bboxes]
ds.attrs['partition_counts'] = np.array(np.unique(partitions,
return_counts=True))
logging.info('Finished in {} seconds.'.format(time()-tzero))
return 0
if __name__ == '__main__':
flags.mark_flag_as_required('input_volume')
flags.mark_flag_as_required('output_volume')
flags.mark_flag_as_required('thresholds')
flags.mark_flag_as_required('lom_radius')
app.run(main)
|
py | 7dfe2cb9b7aa567bd3394b1988171b2af233f782 | class Transition:
def __init__(self, action, state, index):
self.action = action
self.state = state
self.index = index
def __repr__(self):
return f'{self.state} : {self.action} {self.index}'
def __str__(self):
return f'{self.state}/{self.action}'.replace(' ', '_')
def __hash__(self):
return self.__str__().__hash__()
|
py | 7dfe2cd6347b20fc9b61f9094ec883d16d6be361 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import traceback
from st2common import log as logging
from st2common.util import date as date_utils
from st2common.constants import action as action_constants
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.persistence.executionstate import ActionExecutionState
from st2common.services import access, executions
from st2common.util.action_db import (get_action_by_ref, get_runnertype_by_name)
from st2common.util.action_db import (update_liveaction_status, get_liveaction_by_id)
from st2actions.container.service import RunnerContainerService
from st2actions.runners import get_runner, AsyncActionRunner
from st2actions.utils import param_utils
LOG = logging.getLogger(__name__)
class RunnerContainer(object):
def __init__(self):
LOG.info('Action RunnerContainer instantiated.')
self._pending = []
def dispatch(self, liveaction_db):
action_db = get_action_by_ref(liveaction_db.action)
if not action_db:
raise Exception('Action %s not found in dB.' % liveaction_db.action)
runnertype_db = get_runnertype_by_name(action_db.runner_type['name'])
runner_type = runnertype_db.name
LOG.info('Dispatching Action to runner \n%s',
json.dumps(liveaction_db.to_serializable_dict(), indent=4))
LOG.debug(' liverunner_type: %s', runner_type)
LOG.debug(' RunnerType: %s', runnertype_db)
# Get runner instance.
runner = get_runner(runnertype_db.runner_module)
LOG.debug('Runner instance for RunnerType "%s" is: %s', runnertype_db.name, runner)
# Invoke pre_run, run, post_run cycle.
liveaction_db = self._do_run(runner, runnertype_db, action_db, liveaction_db)
LOG.debug('runner do_run result: %s', liveaction_db.result)
liveaction_serializable = liveaction_db.to_serializable_dict()
extra = {'liveaction_db': liveaction_db}
LOG.audit('liveaction complete.', extra=extra)
LOG.info('result :\n%s.', json.dumps(liveaction_serializable.get('result', None), indent=4))
return liveaction_db.result
def _do_run(self, runner, runnertype_db, action_db, liveaction_db):
# Finalized parameters are resolved and then rendered.
runner_params, action_params = param_utils.get_finalized_params(
runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters)
resolved_entry_point = self._get_entry_point_abs_path(action_db.pack,
action_db.entry_point)
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction = liveaction_db
runner.liveaction_id = str(liveaction_db.id)
runner.entry_point = resolved_entry_point
runner.runner_parameters = runner_params
runner.context = getattr(liveaction_db, 'context', dict())
runner.callback = getattr(liveaction_db, 'callback', dict())
runner.libs_dir_path = self._get_action_libs_abs_path(action_db.pack,
action_db.entry_point)
runner.auth_token = self._create_auth_token(runner.context)
updated_liveaction_db = None
try:
# Finalized parameters are resolved and then rendered. This process could
# fail. Handle the exception and report the error correctly.
runner_params, action_params = param_utils.get_finalized_params(
runnertype_db.runner_parameters, action_db.parameters, liveaction_db.parameters)
runner.runner_parameters = runner_params
LOG.debug('Performing pre-run for runner: %s', runner)
runner.pre_run()
LOG.debug('Performing run for runner: %s', runner)
(status, result, context) = runner.run(action_params)
try:
result = json.loads(result)
except:
pass
if (isinstance(runner, AsyncActionRunner) and
status not in action_constants.COMPLETED_STATES):
self._setup_async_query(liveaction_db.id, runnertype_db, context)
except:
LOG.exception('Failed to run action.')
_, ex, tb = sys.exc_info()
# mark execution as failed.
status = action_constants.LIVEACTION_STATUS_FAILED
# include the error message and traceback to try and provide some hints.
result = {'message': str(ex), 'traceback': ''.join(traceback.format_tb(tb, 20))}
context = None
finally:
# Always clean-up the auth_token
updated_liveaction_db = self._update_live_action_db(liveaction_db.id, status,
result, context)
executions.update_execution(updated_liveaction_db)
LOG.debug('Updated liveaction after run: %s', updated_liveaction_db)
# Deletion of the runner generated auth token is delayed until the token expires.
# Async actions such as Mistral workflows uses the auth token to launch other
# actions in the workflow. If the auth token is deleted here, then the actions
# in the workflow will fail with unauthorized exception.
if (not isinstance(runner, AsyncActionRunner) or
(isinstance(runner, AsyncActionRunner) and
status in action_constants.COMPLETED_STATES)):
try:
self._delete_auth_token(runner.auth_token)
except:
LOG.warn('Unable to clean-up auth_token.')
LOG.debug('Performing post_run for runner: %s', runner)
runner.post_run(status, result)
runner.container_service = None
return updated_liveaction_db
def _update_live_action_db(self, liveaction_id, status, result, context):
liveaction_db = get_liveaction_by_id(liveaction_id)
if status in action_constants.COMPLETED_STATES:
end_timestamp = date_utils.get_datetime_utc_now()
else:
end_timestamp = None
liveaction_db = update_liveaction_status(status=status,
result=result,
context=context,
end_timestamp=end_timestamp,
liveaction_db=liveaction_db)
return liveaction_db
def _get_entry_point_abs_path(self, pack, entry_point):
return RunnerContainerService.get_entry_point_abs_path(pack=pack,
entry_point=entry_point)
def _get_action_libs_abs_path(self, pack, entry_point):
return RunnerContainerService.get_action_libs_abs_path(pack=pack,
entry_point=entry_point)
def _create_auth_token(self, context):
if not context:
return None
user = context.get('user', None)
if not user:
return None
return access.create_token(user)
def _delete_auth_token(self, auth_token):
if auth_token:
access.delete_token(auth_token.token)
def _setup_async_query(self, liveaction_id, runnertype_db, query_context):
query_module = getattr(runnertype_db, 'query_module', None)
if not query_module:
LOG.error('No query module specified for runner %s.', runnertype_db)
return
try:
self._create_execution_state(liveaction_id, runnertype_db, query_context)
except:
LOG.exception('Unable to create action execution state db model ' +
'for liveaction_id %s', liveaction_id)
def _create_execution_state(self, liveaction_id, runnertype_db, query_context):
state_db = ActionExecutionStateDB(
execution_id=liveaction_id,
query_module=runnertype_db.query_module,
query_context=query_context)
try:
return ActionExecutionState.add_or_update(state_db)
except:
LOG.exception('Unable to create execution state db for liveaction_id %s.'
% liveaction_id)
return None
def get_runner_container():
return RunnerContainer()
|
py | 7dfe2d4c7c6721a539c5f8b40e5c514b7f7370f6 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: add_cluster_device.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='add_cluster_device.proto',
package='instance',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x18\x61\x64\x64_cluster_device.proto\x12\x08instance\"@\n\x17\x41\x64\x64\x43lusterDeviceRequest\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x11\n\tdeviceIds\x18\x02 \x01(\t\"V\n\x18\x41\x64\x64\x43lusterDeviceResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\t\"\x85\x01\n\x1f\x41\x64\x64\x43lusterDeviceResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x30\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\".instance.AddClusterDeviceResponseb\x06proto3')
)
_ADDCLUSTERDEVICEREQUEST = _descriptor.Descriptor(
name='AddClusterDeviceRequest',
full_name='instance.AddClusterDeviceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='instance.AddClusterDeviceRequest.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deviceIds', full_name='instance.AddClusterDeviceRequest.deviceIds', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=102,
)
_ADDCLUSTERDEVICERESPONSE = _descriptor.Descriptor(
name='AddClusterDeviceResponse',
full_name='instance.AddClusterDeviceResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.AddClusterDeviceResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.AddClusterDeviceResponse.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='instance.AddClusterDeviceResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.AddClusterDeviceResponse.data', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=190,
)
_ADDCLUSTERDEVICERESPONSEWRAPPER = _descriptor.Descriptor(
name='AddClusterDeviceResponseWrapper',
full_name='instance.AddClusterDeviceResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='instance.AddClusterDeviceResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='instance.AddClusterDeviceResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='instance.AddClusterDeviceResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='instance.AddClusterDeviceResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=193,
serialized_end=326,
)
_ADDCLUSTERDEVICERESPONSEWRAPPER.fields_by_name['data'].message_type = _ADDCLUSTERDEVICERESPONSE
DESCRIPTOR.message_types_by_name['AddClusterDeviceRequest'] = _ADDCLUSTERDEVICEREQUEST
DESCRIPTOR.message_types_by_name['AddClusterDeviceResponse'] = _ADDCLUSTERDEVICERESPONSE
DESCRIPTOR.message_types_by_name['AddClusterDeviceResponseWrapper'] = _ADDCLUSTERDEVICERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AddClusterDeviceRequest = _reflection.GeneratedProtocolMessageType('AddClusterDeviceRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERDEVICEREQUEST,
'__module__' : 'add_cluster_device_pb2'
# @@protoc_insertion_point(class_scope:instance.AddClusterDeviceRequest)
})
_sym_db.RegisterMessage(AddClusterDeviceRequest)
AddClusterDeviceResponse = _reflection.GeneratedProtocolMessageType('AddClusterDeviceResponse', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERDEVICERESPONSE,
'__module__' : 'add_cluster_device_pb2'
# @@protoc_insertion_point(class_scope:instance.AddClusterDeviceResponse)
})
_sym_db.RegisterMessage(AddClusterDeviceResponse)
AddClusterDeviceResponseWrapper = _reflection.GeneratedProtocolMessageType('AddClusterDeviceResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _ADDCLUSTERDEVICERESPONSEWRAPPER,
'__module__' : 'add_cluster_device_pb2'
# @@protoc_insertion_point(class_scope:instance.AddClusterDeviceResponseWrapper)
})
_sym_db.RegisterMessage(AddClusterDeviceResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
py | 7dfe2d711ccc2f0bf35ff686a172a1c578ed7ac7 | # -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
HERE = os.path.dirname(os.path.abspath(__file__))
CONFIG = {
'instances': [
{
'name': 'conn_error',
'url': 'https://thereisnosuchlink.com',
'check_certificate_expiration': False,
'timeout': 1,
},
{
'name': 'http_error_status_code',
'url': 'http://httpbin.org/404',
'check_certificate_expiration': False,
'timeout': 1,
},
{
'name': 'status_code_match',
'url': 'http://httpbin.org/404',
'http_response_status_code': '4..',
'check_certificate_expiration': False,
'timeout': 1,
'tags': ["foo:bar"],
},
{
'name': 'cnt_mismatch',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': 'thereisnosuchword',
},
{
'name': 'cnt_match',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': '(thereisnosuchword|github)',
},
{
'name': 'cnt_match_unicode',
'url': 'https://ja.wikipedia.org/',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': u'メインページ',
},
{
'name': 'cnt_mismatch_unicode',
'url': 'https://ja.wikipedia.org/',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': u'メインペーー',
},
{
'name': 'cnt_mismatch_reverse',
'url': 'https://github.com',
'timeout': 1,
'reverse_content_match': True,
'check_certificate_expiration': False,
'content_match': 'thereisnosuchword',
},
{
'name': 'cnt_match_reverse',
'url': 'https://github.com',
'timeout': 1,
'reverse_content_match': True,
'check_certificate_expiration': False,
'content_match': '(thereisnosuchword|github)',
},
{
'name': 'cnt_mismatch_unicode_reverse',
'url': 'https://ja.wikipedia.org/',
'timeout': 1,
'reverse_content_match': True,
'check_certificate_expiration': False,
'content_match': u'メインペーー',
},
{
'name': 'cnt_match_unicode_reverse',
'url': 'https://ja.wikipedia.org/',
'timeout': 1,
'reverse_content_match': True,
'check_certificate_expiration': False,
'content_match': u'メインページ',
},
]
}
CONFIG_E2E = {'init_config': {'ca_certs': '/opt/cacert.pem'}, 'instances': CONFIG['instances']}
CONFIG_SSL_ONLY = {
'instances': [
{
'name': 'good_cert',
'url': 'https://github.com:443',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7,
},
{
'name': 'cert_exp_soon',
'url': 'https://google.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 9999,
'days_critical': 7,
},
{
'name': 'cert_critical',
'url': 'https://google.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 9999,
'days_critical': 9999,
},
{
'name': 'conn_error',
'url': 'https://thereisnosuchlink.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7,
},
]
}
CONFIG_EXPIRED_SSL = {
'instances': [
{
'name': 'expired_cert',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7,
},
{
'name': 'expired_cert_seconds',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'seconds_warning': 3600,
'seconds_critical': 60,
},
]
}
CONFIG_CUSTOM_NAME = {
'instances': [
{
'name': 'cert_validation_fails',
'url': 'https://github.com:443',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7,
'ssl_server_name': 'incorrect_name',
},
{
'name': 'cert_validation_passes',
'url': 'https://github.com:443',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7,
'ssl_server_name': 'github.com',
},
]
}
CONFIG_UNORMALIZED_INSTANCE_NAME = {
'instances': [
{
'name': '_need-to__be_normalized-',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14,
'days_critical': 7,
}
]
}
CONFIG_DONT_CHECK_EXP = {
'instances': [{'name': 'simple_config', 'url': 'http://httpbin.org', 'check_certificate_expiration': False}]
}
CONFIG_HTTP_REDIRECTS = {
'instances': [
{
'name': 'redirect_service',
'url': 'http://github.com',
'timeout': 1,
'http_response_status_code': 301,
'allow_redirects': False,
}
]
}
FAKE_CERT = {'notAfter': 'Apr 12 12:00:00 2006 GMT'}
CONFIG_DATA_METHOD = {
'instances': [
{
'name': 'post_json',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'post',
'data': {'foo': 'bar', 'baz': ['qux', 'quux']},
},
{
'name': 'post_str',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'post',
'data': '<soap:Envelope xmlns:soap="http://www.w3.org/2003/05/soap-envelope"'
'xmlns:m="http://www.example.org/stocks"><soap:Header></soap:Header><soap:Body><m:GetStockPrice>'
'<m:StockName>EXAMPLE</m:StockName></m:GetStockPrice></soap:Body></soap:Envelope>',
},
{
'name': 'put_json',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'put',
'data': {'foo': 'bar', 'baz': ['qux', 'quux']},
},
{'name': 'put_str', 'url': 'http://mockbin.com/request', 'timeout': 1, 'method': 'put', 'data': 'Lorem ipsum'},
{
'name': 'patch_json',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'patch',
'data': {'foo': 'bar', 'baz': ['qux', 'quux']},
},
{
'name': 'patch_str',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'patch',
'data': 'Lorem ipsum',
},
{
'name': 'delete_json',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'delete',
'data': {'foo': 'bar', 'baz': ['qux', 'quux']},
},
{
'name': 'delete_str',
'url': 'http://mockbin.com/request',
'timeout': 1,
'method': 'delete',
'data': 'Lorem ipsum',
},
]
}
|
py | 7dfe2da4f7d40da10fab6ea3d7080d9b194178df | """
byceps.services.seating.dbmodels.seat
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from collections import namedtuple
from typing import Optional
from sqlalchemy.ext.hybrid import hybrid_property
from ....database import db, generate_uuid
from ....util.instances import ReprBuilder
from ...ticketing.dbmodels.category import Category
from ...ticketing.transfer.models import TicketCategoryID
from ..transfer.models import AreaID
from .area import Area
Point = namedtuple('Point', ['x', 'y'])
class Seat(db.Model):
"""A seat."""
__tablename__ = 'seats'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
area_id = db.Column(db.Uuid, db.ForeignKey('seating_areas.id'), index=True, nullable=False)
area = db.relationship(Area, backref='seats')
coord_x = db.Column(db.Integer, nullable=False)
coord_y = db.Column(db.Integer, nullable=False)
category_id = db.Column(db.Uuid, db.ForeignKey('ticket_categories.id'), index=True, nullable=False)
category = db.relationship(Category)
label = db.Column(db.UnicodeText, nullable=True)
type_ = db.Column('type', db.UnicodeText, nullable=True)
def __init__(
self,
area_id: AreaID,
category_id: TicketCategoryID,
*,
coord_x: int = 0,
coord_y: int = 0,
label: Optional[str] = None,
type_: Optional[str] = None,
) -> None:
self.area_id = area_id
self.coord_x = coord_x
self.coord_y = coord_y
self.category_id = category_id
self.label = label
self.type_ = type_
@hybrid_property
def coords(self) -> Point:
return Point(x=self.coord_x, y=self.coord_y)
@coords.setter
def coords(self, point: Point) -> None:
self.coord_x = point.x
self.coord_y = point.y
def __repr__(self) -> str:
return ReprBuilder(self) \
.add('id', str(self.id)) \
.add_with_lookup('area') \
.add_with_lookup('category') \
.add_with_lookup('label') \
.build()
|
py | 7dfe2e2fadfcabaab8e9b8c4494636b4eaaa564d | from ..base.problem_transformation import ProblemTransformationBase
from scipy.sparse import hstack
from sklearn.exceptions import NotFittedError
import copy
class ClassifierChain(ProblemTransformationBase):
"""Constructs a bayesian conditioned chain of per label classifiers
This class provides implementation of Jesse Read's problem
transformation method called Classifier Chains. For L labels it
trains L classifiers ordered in a chain according to the
`Bayesian chain rule`.
The first classifier is trained just on the input space, and then
each next classifier is trained on the input space and all previous
classifiers in the chain.
The default classifier chains follow the same ordering as provided
in the training set, i.e. label in column 0, then 1, etc.
Parameters
----------
classifier : :class:`~sklearn.base.BaseEstimator`
scikit-learn compatible base classifier
require_dense : [bool, bool], optional
whether the base classifier requires dense representations
for input features and classes/labels matrices in fit/predict.
If value not provided, sparse representations are used if base classifier is
an instance of :class:`~skmultilearn.base.MLClassifierBase` and dense otherwise.
order : List[int], permutation of ``range(n_labels)``, optional
the order in which the chain should go through labels, the default is ``range(n_labels)``
Attributes
----------
classifiers_ : List[:class:`~sklearn.base.BaseEstimator`] of shape `n_labels`
list of classifiers trained per partition, set in :meth:`fit`
References
----------
If used, please cite the scikit-multilearn library and the relevant paper:
.. code-block:: bibtex
@inproceedings{read2009classifier,
title={Classifier chains for multi-label classification},
author={Read, Jesse and Pfahringer, Bernhard and Holmes, Geoff and Frank, Eibe},
booktitle={Joint European Conference on Machine Learning and Knowledge Discovery in Databases},
pages={254--269},
year={2009},
organization={Springer}
}
Examples
--------
An example use case for Classifier Chains
with an :class:`sklearn.svm.SVC` base classifier which supports sparse input:
.. code-block:: python
from skmultilearn.problem_transform import ClassifierChain
from sklearn.svm import SVC
# initialize Classifier Chain multi-label classifier
# with an SVM classifier
# SVM in scikit only supports the X matrix in sparse representation
classifier = ClassifierChain(
classifier = SVC(),
require_dense = [False, True]
)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
Another way to use this classifier is to select the best scenario from a set of single-label classifiers used
with Classifier Chain, this can be done using cross validation grid search. In the example below, the model
with highest accuracy results is selected from either a :class:`sklearn.naive_bayes.MultinomialNB` or
:class:`sklearn.svm.SVC` base classifier, alongside with best parameters for that base classifier.
.. code-block:: python
from skmultilearn.problem_transform import ClassifierChain
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
parameters = [
{
'classifier': [MultinomialNB()],
'classifier__alpha': [0.7, 1.0],
},
{
'classifier': [SVC()],
'classifier__kernel': ['rbf', 'linear'],
},
]
clf = GridSearchCV(ClassifierChain(), parameters, scoring='accuracy')
clf.fit(x, y)
print (clf.best_params_, clf.best_score_)
# result
# {'classifier': MultinomialNB(alpha=0.7, class_prior=None, fit_prior=True), 'classifier__alpha': 0.7} 0.16
"""
def __init__(self, classifier=None, require_dense=None, order=None):
super(ClassifierChain, self).__init__(classifier, require_dense)
self.order = order
self.copyable_attrs = ['classifier', 'require_dense', 'order']
def fit(self, X, y, order=None):
"""Fits classifier to training data
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
Notes
-----
.. note :: Input matrices are converted to sparse format internally if a numpy representation is passed
"""
# fit L = len(y[0]) BR classifiers h_i
# on X + y[:i] as input space and y[i+1] as output
X_extended = self._ensure_input_format(X, sparse_format='csc', enforce_sparse=True)
y = self._ensure_output_format(y, sparse_format='csc', enforce_sparse=True)
self._label_count = y.shape[1]
self.classifiers_ = [None for x in range(self._label_count)]
for label in self._order():
self.classifier = copy.deepcopy(self.classifier)
y_subset = self._generate_data_subset(y, label, axis=1)
self.classifiers_[label] = self.classifier.fit(self._ensure_input_format(
X_extended), self._ensure_output_format(y_subset))
X_extended = hstack([X_extended, y_subset])
return self
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
"""
X_extended = self._ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
for label in self._order():
prediction = self.classifiers_[label].predict(
self._ensure_input_format(X_extended))
prediction = self._ensure_multi_label_from_single_class(prediction)
X_extended = hstack([X_extended, prediction])
return X_extended[:, -self._label_count:]
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
Returns
-------
:mod:`scipy.sparse` matrix of `float in [0.0, 1.0]`, shape=(n_samples, n_labels)
matrix with label assignment probabilities
"""
X_extended = self._ensure_input_format(
X, sparse_format='csc', enforce_sparse=True)
results = []
for label in self._order():
prediction = self.classifiers_[label].predict(
self._ensure_input_format(X_extended))
prediction = self._ensure_output_format(
prediction, sparse_format='csc', enforce_sparse=True)
prediction_proba = self.classifiers_[label].predict_proba(
self._ensure_input_format(X_extended))
prediction_proba = self._ensure_output_format(
prediction_proba, sparse_format='csc', enforce_sparse=True)[:, 1]
X_extended = hstack([X_extended, prediction]).tocsc()
results.append(prediction_proba)
return hstack(results)
def _order(self):
if self.order is not None:
return self.order
try:
return list(range(self._label_count))
except AttributeError:
raise NotFittedError("This Classifier Chain has not been fit yet")
|
py | 7dfe2ee5e45fe94e89cc2c39dee4c62ff5588ec8 | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#493. Reverse Pairs
#Given an array nums, we call (i, j) an important reverse pair if i < j and nums[i] > 2*nums[j].
#You need to return the number of important reverse pairs in the given array.
#Example1:
#Input: [1,3,2,3,1]
#Output: 2
#Example2:
#Input: [2,4,3,5,1]
#Output: 3
#Note:
#The length of the given array will not exceed 50,000.
#All the numbers in the input array are in the range of 32-bit integer.
#class Solution(object):
# def reversePairs(self, nums):
# """
# :type nums: List[int]
# :rtype: int
# """
# Time Is Money |
py | 7dfe2fbd80c6f95e172c66d0ef87f14b6c2014f7 | import os
import requests
class Translator:
def __init__(self):
pass
def translate(self, text: str, from_lang='en', to_lang='ru') -> str:
raise NotImplemented()
class YandexTranslator(Translator):
def __init__(self, api_key=None):
super().__init__()
self.api_key = api_key or os.environ.get("YANDEX_API_KEY")
self.url = "https://translate.yandex.net/api/v1.5/tr.json/translate"
def translate(self, text: str, from_lang='en', to_lang='ru'):
if self.api_key:
querystring = {
"key": self.api_key,
"text": text,
"lang": "{}-{}".format(from_lang, to_lang)
}
response = requests.request("GET", self.url, params=querystring)
resp = response.json()
return resp['text'][0]
else:
return None
if __name__ == '__main__':
translator = YandexTranslator()
ru_text = translator.translate("However there are only a few controlled studies which confirm this finding.")
print(ru_text)
|
py | 7dfe2fde22022233d2a63d5bdf71d02a1c140381 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Linh Pham
# wwdtm_winstreaks is relased under the terms of the Apache License 2.0
"""Calculate and display panelist win streaks from scores in the WWDTM
Stats database"""
from collections import OrderedDict
import json
import math
import os
from typing import List, Dict
import mysql.connector
from mysql.connector.errors import DatabaseError, ProgrammingError
import numpy
def retrieve_all_scores(database_connection: mysql.connector.connect
) -> List[int]:
"""Retrieve a list of all panelist scores from non-Best Of and
non-Repeat shows"""
cursor = database_connection.cursor()
query = ("SELECT pm.panelistscore FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE s.bestof = 0 AND s.repeatshowid IS NULL "
"AND pm.panelistscore IS NOT NULL "
"ORDER BY pm.panelistscore ASC;")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
scores = []
for row in result:
scores.append(row[0])
return scores
def retrieve_grouped_scores(database_connection: mysql.connector.connect
) -> Dict:
"""Retrieve a list of grouped panelist scores from non-Best Of and
non-Repeat shows"""
cursor = database_connection.cursor()
query = ("SELECT pm.panelistscore, COUNT(pm.panelistscore) "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"WHERE pm.panelistscore IS NOT NULL "
"AND s.bestof = 0 AND s.repeatshowid IS NULL "
"GROUP BY pm.panelistscore "
"ORDER BY pm.panelistscore ASC;")
cursor.execute(query)
result = cursor.fetchall()
if not result:
return None
scores = []
for row in result:
score = OrderedDict()
score["score"] = row[0]
score["count"] = row[1]
scores.append(score)
return scores
def calculate_stats(scores: List[int]):
"""Calculate stats for all of the panelist scores"""
stats = OrderedDict()
stats["count"] = len(scores)
stats["minimum"] = int(numpy.amin(scores))
stats["maximum"] = int(numpy.amax(scores))
stats["mean"] = round(numpy.mean(scores), 4)
stats["median"] = int(numpy.median(scores))
stats["standard_deviation"] = round(numpy.std(scores), 4)
stats["total"] = int(numpy.sum(scores))
return stats
def print_stats(stats: Dict):
"""Print out the score stats"""
print()
print(" Panelist Scores")
print(" Count: {}".format(stats["count"]))
print(" Minimum: {}".format(stats["minimum"]))
print(" Maximum: {}".format(stats["maximum"]))
print(" Median: {}".format(stats["median"]))
print(" Mean: {}".format(stats["mean"]))
print(" Std Dev: {}".format(stats["standard_deviation"]))
print(" Total: {}".format(stats["total"]))
print("\n\n")
return
def print_score_spread(score_spread: Dict):
"""Print out the scrore spread"""
print(" Score Spread\n")
print(" Score Count")
for score in score_spread:
print(" {:>7}{:>12}".format(score["score"], score["count"]))
print()
return
def load_config(app_environment) -> Dict:
"""Load configuration file from config.json"""
with open('config.json', 'r') as config_file:
config_dict = json.load(config_file)
if app_environment.startswith("develop"):
if "development" in config_dict:
config = config_dict["development"]
else:
raise Exception("Missing 'development' section in config file")
elif app_environment.startswith("prod"):
if "production" in config_dict:
config = config_dict['production']
else:
raise Exception("Missing 'production' section in config file")
else:
if "local" in config_dict:
config = config_dict["local"]
else:
raise Exception("Missing 'local' section in config file")
return config
def main():
"""Pull in scoring data and generate stats based on the data"""
app_environment = os.getenv("APP_ENV", "local").strip().lower()
config = load_config(app_environment)
database_connection = mysql.connector.connect(**config["database"])
all_scores = retrieve_all_scores(database_connection)
stats = calculate_stats(all_scores)
print_stats(stats)
grouped_scores = retrieve_grouped_scores(database_connection)
print_score_spread(grouped_scores)
return None
# Only run if executed as a script and not imported
if __name__ == "__main__":
main()
|
py | 7dfe305e96a0979866df2fac41a996766cfe26e5 | from django.conf.urls import include, url # noqa
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(r"^$", TemplateView.as_view(template_name="index.html"), name="index"),
path("", include("todos.urls"))
]
|
py | 7dfe30d8065fb60b73456503700795b3cbf21c75 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-05 18:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
|
py | 7dfe3186f8a8101fe611a6926b38ca1df625f579 | # -*- coding: utf-8 -*-
import sys
#FIXME: use PYTHONPATH with module directory
sys.path.append('../pytamil')
from pytamil import தமிழ்
from தமிழ் import இலக்கணம் as இல
from தமிழ் import புணர்ச்சி
from தமிழ் import எழுத்து
from தமிழ் import மாத்திரை
# print( எழுத்து.எழுத்துக்கள்['மெல்லினம்'])
# print( எழுத்து.எழுத்துக்கள்['குறில்'] )
# print (தமிழ்.வட்டெழுத்து('வணக்கம்'))
# print ( எழுத்து.தொடர்மொழி_ஆக்கு('விருந்து', 'ஓம்பல்' ))
# print( இல.தொடர்மொழி_ஆக்கு('விருந்து', 'ஓம்பல்'))
# print( இல.தொடர்மொழி_ஆக்கு('மெய்', 'எழுத்து'))
# print( இல.தொடர்மொழி_ஆக்கு('மெய்', 'பழுத்து'))
# print( இல.தொடர்மொழி_ஆக்கு('முள்', 'இலை'))
# print( இல.தொடர்மொழி_ஆக்கு('உயிர்', 'எழுத்து'))
# print( இல.தொடர்மொழி_ஆக்கு('வேல்', 'எறிந்தான்'))
# விதிகள் =[]
# விதிகள் = getவிதிகள்(entries,விதிகள்)
# சான்றுகள் = []
# சான்றுகள் = getசான்றுகள்(entries, சான்றுகள்)
# print(விதிகள்)
# print(சான்றுகள்)
# result = புணர்ச்சி.check("(...)(இ,ஈ,ஐ)" ,"மணிதன்")
# print (result)
# result = புணர்ச்சி.check("(...)(உயிர்)" , "மணி")
# print (result)
# result = புணர்ச்சி.check("(உயிர்)(...)" , "அடி")
# print (result)
# print(புணர்ச்சி.தொடர்மொழி_ஆக்கு( 'உயிர்' , 'எழுத்து'))
# புணர்ச்சி.புணர்ச்சிசெய்('''சே|உடம்படுமெய்(ய்)|சும்மா + சும்மா|திரிதல்(வ்)|அடி ,
# சே|உடம்படுமெய்(வ்) + திரிதல்(வ்)|அடி,
# சே|உடம்படுமெய்(வ்) + திரிதல்(வ்)|அடி ''')
# புணர்ச்சி.புணர்ச்சிசெய்('''சே|உடம்படுமெய்(ய்)|சும்மா + சும்மா|திரிதல்(வ்)|அடி ,
# சே|உடம்படுமெய்(வ்) + திரிதல்(வ்)|அடி''')
# புணர்ச்சி.புணர்ச்சிசெய்('சேய் +இயல்பு+ அவ்')
# புணர்ச்சி.தொடர்மொழி_ஆக்கு('சே', 'அடி' )
# புணர்ச்சி.தொடர்மொழி_ஆக்கு('கண்', 'மங்கியது')
# print(மாத்திரை.மாத்திரை_கொடு('புணர்ச்சிசெய்'))
# print(மாத்திரை.மொத்தமாத்திரை('புணர்ச்சிசெய்'))
# print(புணர்ச்சி.தொடர்மொழி_ஆக்கு( 'மணி' , 'அடித்தான்'))
# print(புணர்ச்சி.தொடர்மொழி_ஆக்கு( 'மெய்', 'எழுத்து'))
print(புணர்ச்சி.தொடர்மொழி_ஆக்கு( 'நிலா', 'ஒளி'))
|
py | 7dfe33d7dbc223b9c3bb7c3c20adc4ac87a74a2a | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from aenum import Enum
from .. import statics
from ..statics import long
class Traversal(object):
def __init__(self, graph, traversal_strategies, bytecode):
self.graph = graph
self.traversal_strategies = traversal_strategies
self.bytecode = bytecode
self.side_effects = TraversalSideEffects()
self.traversers = None
self.last_traverser = None
def __repr__(self):
return str(self.bytecode)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.bytecode == other.bytecode
else:
return False
def __iter__(self):
return self
def __next__(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
self.last_traverser = next(self.traversers)
object = self.last_traverser.object
self.last_traverser.bulk = self.last_traverser.bulk - 1
if self.last_traverser.bulk <= 0:
self.last_traverser = None
return object
def toList(self):
return list(iter(self))
def toSet(self):
return set(iter(self))
def iterate(self):
self.bytecode.add_step("none")
while True:
try: self.nextTraverser()
except StopIteration: return self
def nextTraverser(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
return next(self.traversers)
else:
temp = self.last_traverser
self.last_traverser = None
return temp
def hasNext(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
try: self.last_traverser = next(self.traversers)
except StopIteration: return False
return not(self.last_traverser is None) and self.last_traverser.bulk > 0
def next(self, amount=None):
if amount is None:
return self.__next__()
else:
count = 0
tempList = []
while count < amount:
count = count + 1
try: temp = self.__next__()
except StopIteration: return tempList
tempList.append(temp)
return tempList
def promise(self, cb=None):
self.traversal_strategies.apply_async_strategies(self)
future_traversal = self.remote_results
future = type(future_traversal)()
def process(f):
try:
traversal = f.result()
except Exception as e:
future.set_exception(e)
else:
self.traversers = iter(traversal.traversers)
self.side_effects = traversal.side_effects
if cb:
try:
result = cb(self)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
else:
future.set_result(self)
future_traversal.add_done_callback(process)
return future
Barrier = Enum('Barrier', ' normSack')
statics.add_static('normSack', Barrier.normSack)
Cardinality = Enum('Cardinality', ' list_ set_ single')
statics.add_static('single', Cardinality.single)
statics.add_static('list_', Cardinality.list_)
statics.add_static('set_', Cardinality.set_)
Column = Enum('Column', ' keys values')
statics.add_static('keys', Column.keys)
statics.add_static('values', Column.values)
Direction = Enum('Direction', ' BOTH IN OUT')
statics.add_static('OUT', Direction.OUT)
statics.add_static('IN', Direction.IN)
statics.add_static('BOTH', Direction.BOTH)
GraphSONVersion = Enum('GraphSONVersion', ' V1_0 V2_0 V3_0')
statics.add_static('V1_0', GraphSONVersion.V1_0)
statics.add_static('V2_0', GraphSONVersion.V2_0)
statics.add_static('V3_0', GraphSONVersion.V3_0)
GryoVersion = Enum('GryoVersion', ' V1_0 V3_0')
statics.add_static('V1_0', GryoVersion.V1_0)
statics.add_static('V3_0', GryoVersion.V3_0)
Order = Enum('Order', ' asc decr desc incr shuffle')
statics.add_static('incr', Order.incr)
statics.add_static('decr', Order.decr)
statics.add_static('shuffle', Order.shuffle)
statics.add_static('asc', Order.asc)
statics.add_static('desc', Order.desc)
Pick = Enum('Pick', ' any none')
statics.add_static('any', Pick.any)
statics.add_static('none', Pick.none)
Pop = Enum('Pop', ' all_ first last mixed')
statics.add_static('first', Pop.first)
statics.add_static('last', Pop.last)
statics.add_static('all_', Pop.all_)
statics.add_static('mixed', Pop.mixed)
Scope = Enum('Scope', ' global_ local')
statics.add_static('global_', Scope.global_)
statics.add_static('local', Scope.local)
T = Enum('T', ' id id_ key label value')
statics.add_static('id', T.id)
statics.add_static('label', T.label)
statics.add_static('id_', T.id_)
statics.add_static('key', T.key)
statics.add_static('value', T.value)
Operator = Enum('Operator', ' addAll and_ assign div max max_ min min_ minus mult or_ sum sum_ sumLong')
statics.add_static('sum_', Operator.sum_)
statics.add_static('sum', Operator.sum_)
statics.add_static('minus', Operator.minus)
statics.add_static('mult', Operator.mult)
statics.add_static('div', Operator.div)
statics.add_static('min', Operator.min_)
statics.add_static('min_', Operator.min_)
statics.add_static('max_', Operator.max_)
statics.add_static('assign', Operator.assign)
statics.add_static('and_', Operator.and_)
statics.add_static('or_', Operator.or_)
statics.add_static('addAll', Operator.addAll)
statics.add_static('sumLong', Operator.sumLong)
class P(object):
def __init__(self, operator, value, other=None):
self.operator = operator
self.value = value
self.other = other
@staticmethod
def between(*args):
return P("between", *args)
@staticmethod
def eq(*args):
return P("eq", *args)
@staticmethod
def gt(*args):
return P("gt", *args)
@staticmethod
def gte(*args):
return P("gte", *args)
@staticmethod
def inside(*args):
return P("inside", *args)
@staticmethod
def lt(*args):
return P("lt", *args)
@staticmethod
def lte(*args):
return P("lte", *args)
@staticmethod
def neq(*args):
return P("neq", *args)
@staticmethod
def not_(*args):
return P("not", *args)
@staticmethod
def outside(*args):
return P("outside", *args)
@staticmethod
def test(*args):
return P("test", *args)
@staticmethod
def within(*args):
if len(args) == 1 and type(args[0]) == list:
return P("within", args[0])
else:
return P("within", list(args))
@staticmethod
def without(*args):
if len(args) == 1 and type(args[0]) == list:
return P("without", args[0])
else:
return P("without", list(args))
def and_(self, arg):
return P("and", self, arg)
def or_(self, arg):
return P("or", self, arg)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other
def __repr__(self):
return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str(self.value) + "," + str(self.other) + ")"
def between(*args):
return P.between(*args)
def eq(*args):
return P.eq(*args)
def gt(*args):
return P.gt(*args)
def gte(*args):
return P.gte(*args)
def inside(*args):
return P.inside(*args)
def lt(*args):
return P.lt(*args)
def lte(*args):
return P.lte(*args)
def neq(*args):
return P.neq(*args)
def not_(*args):
return P.not_(*args)
def outside(*args):
return P.outside(*args)
def test(*args):
return P.test(*args)
def within(*args):
return P.within(*args)
def without(*args):
return P.without(*args)
statics.add_static('between', between)
statics.add_static('eq', eq)
statics.add_static('gt', gt)
statics.add_static('gte', gte)
statics.add_static('inside', inside)
statics.add_static('lt', lt)
statics.add_static('lte', lte)
statics.add_static('neq', neq)
statics.add_static('not_', not_)
statics.add_static('outside', outside)
statics.add_static('test', test)
statics.add_static('within', within)
statics.add_static('without', without)
class TextP(P):
def __init__(self, operator, value, other=None):
P.__init__(self, operator, value, other)
@staticmethod
def containing(*args):
return TextP("containing", *args)
@staticmethod
def endingWith(*args):
return TextP("endingWith", *args)
@staticmethod
def notContaining(*args):
return TextP("notContaining", *args)
@staticmethod
def notEndingWith(*args):
return TextP("notEndingWith", *args)
@staticmethod
def notStartingWith(*args):
return TextP("notStartingWith", *args)
@staticmethod
def startingWith(*args):
return TextP("startingWith", *args)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other
def __repr__(self):
return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str(self.value) + "," + str(self.other) + ")"
def containing(*args):
return TextP.containing(*args)
def endingWith(*args):
return TextP.endingWith(*args)
def notContaining(*args):
return TextP.notContaining(*args)
def notEndingWith(*args):
return TextP.notEndingWith(*args)
def notStartingWith(*args):
return TextP.notStartingWith(*args)
def startingWith(*args):
return TextP.startingWith(*args)
statics.add_static('containing', containing)
statics.add_static('endingWith', endingWith)
statics.add_static('notContaining', notContaining)
statics.add_static('notEndingWith', notEndingWith)
statics.add_static('notStartingWith', notStartingWith)
statics.add_static('startingWith', startingWith)
'''
IO
'''
class IO(object):
graphml = "graphml"
graphson = "graphson"
gryo = "gryo"
reader = "~tinkerpop.io.reader"
registry = "~tinkerpop.io.registry"
writer = "~tinkerpop.io.writer"
'''
ConnectedComponent
'''
class ConnectedComponent(object):
component = "gremlin.connectedComponentVertexProgram.component"
edges = "~tinkerpop.connectedComponent.edges"
propertyName = "~tinkerpop.connectedComponent.propertyName"
'''
ShortestPath
'''
class ShortestPath(object):
distance = "~tinkerpop.shortestPath.distance"
edges = "~tinkerpop.shortestPath.edges"
includeEdges = "~tinkerpop.shortestPath.includeEdges"
maxDistance = "~tinkerpop.shortestPath.maxDistance"
target = "~tinkerpop.shortestPath.target"
'''
PageRank
'''
class PageRank(object):
edges = "~tinkerpop.pageRank.edges"
propertyName = "~tinkerpop.pageRank.propertyName"
times = "~tinkerpop.pageRank.times"
'''
PeerPressure
'''
class PeerPressure(object):
edges = "~tinkerpop.peerPressure.edges"
propertyName = "~tinkerpop.peerPressure.propertyName"
times = "~tinkerpop.peerPressure.times"
'''
TRAVERSER
'''
class Traverser(object):
def __init__(self, object, bulk=None):
if bulk is None:
bulk = long(1)
self.object = object
self.bulk = bulk
def __repr__(self):
return str(self.object)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.object == other.object
'''
TRAVERSAL SIDE-EFFECTS
'''
class TraversalSideEffects(object):
def keys(self):
return set()
def get(self, key):
raise KeyError(key)
def __getitem__(self, key):
return self.get(key)
def __repr__(self):
return "sideEffects[size:" + str(len(self.keys())) + "]"
'''
TRAVERSAL STRATEGIES
'''
class TraversalStrategies(object):
global_cache = {}
def __init__(self, traversal_strategies=None):
self.traversal_strategies = traversal_strategies.traversal_strategies if traversal_strategies is not None else []
def add_strategies(self, traversal_strategies):
self.traversal_strategies = self.traversal_strategies + traversal_strategies
def apply_strategies(self, traversal):
for traversal_strategy in self.traversal_strategies:
traversal_strategy.apply(traversal)
def apply_async_strategies(self, traversal):
for traversal_strategy in self.traversal_strategies:
traversal_strategy.apply_async(traversal)
def __repr__(self):
return str(self.traversal_strategies)
class TraversalStrategy(object):
def __init__(self, strategy_name=None, configuration=None, fqcn=None):
self.fqcn = fqcn
self.strategy_name = type(self).__name__ if strategy_name is None else strategy_name
self.configuration = {} if configuration is None else configuration
def apply(self, traversal):
return
def apply_async(self, traversal):
return
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return hash(self.strategy_name)
def __repr__(self):
return self.strategy_name
'''
BYTECODE
'''
class Bytecode(object):
def __init__(self, bytecode=None):
self.source_instructions = []
self.step_instructions = []
self.bindings = {}
if bytecode is not None:
self.source_instructions = list(bytecode.source_instructions)
self.step_instructions = list(bytecode.step_instructions)
def add_source(self, source_name, *args):
instruction = [source_name]
for arg in args:
instruction.append(self.__convertArgument(arg))
self.source_instructions.append(instruction)
def add_step(self, step_name, *args):
instruction = [step_name]
for arg in args:
instruction.append(self.__convertArgument(arg))
self.step_instructions.append(instruction)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.source_instructions == other.source_instructions and self.step_instructions == other.step_instructions
else:
return False
def __convertArgument(self,arg):
if isinstance(arg, Traversal):
self.bindings.update(arg.bytecode.bindings)
return arg.bytecode
elif isinstance(arg, dict):
newDict = {}
for key in arg:
newDict[self.__convertArgument(key)] = self.__convertArgument(arg[key])
return newDict
elif isinstance(arg, list):
newList = []
for item in arg:
newList.append(self.__convertArgument(item))
return newList
elif isinstance(arg, set):
newSet = set()
for item in arg:
newSet.add(self.__convertArgument(item))
return newSet
elif isinstance(arg, Binding):
self.bindings[arg.key] = arg.value
return Binding(arg.key, self.__convertArgument(arg.value))
else:
return arg
def __repr__(self):
return (str(self.source_instructions) if len(self.source_instructions) > 0 else "") + \
(str(self.step_instructions) if len(self.step_instructions) > 0 else "")
'''
BINDINGS
'''
class Bindings(object):
@staticmethod
def of(key, value):
if not isinstance(key, str):
raise TypeError("Key must be str")
return Binding(key, value)
class Binding(object):
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.key == other.key and self.value == other.value
def __hash__(self):
return hash(self.key) + hash(self.value)
def __repr__(self):
return "binding[" + self.key + "=" + str(self.value) + "]"
'''
WITH OPTIONS
'''
class WithOptions(object):
tokens = "~tinkerpop.valueMap.tokens"
none = 0
ids = 1
labels = 2
keys = 4
values = 8
all = 15
indexer = "~tinkerpop.index.indexer"
list = 0
map = 1
|
py | 7dfe33df9d2e0aca53324cf39a47018935f9184f |
# Define these below batch to avoid circular dependency
from typecraft_python.integrations.obt.tagger import ObtTagger
from typecraft_python.integrations.nltk.tagger import NltkTagger
from typecraft_python.integrations.treetagger import TreeTagger
import six
def batch(iterable, n=1):
length = len(iterable)
for next_index in range(0, length, n):
yield iterable[next_index:min(next_index + n, length)]
# Taken from https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
TAGGER_TRANSLATIONS = {
'tree': TreeTagger,
'obt': ObtTagger,
'nltk': NltkTagger
}
def get_tagger_by_name(name):
assert isinstance(name, six.string_types)
name_lower = name.lower()
if 'tree' in name_lower:
return TreeTagger
if 'nl' in name_lower:
return NltkTagger
if 'ob' in name_lower:
return ObtTagger
raise ValueError("Tagger %s not found" % (name,))
|
py | 7dfe3562106d1a54a24101ebe6768e060f4b46d2 | import click
from gravity import config_manager
from gravity import options
@click.command("reregister")
@options.required_config_arg(name="old_config")
@options.required_config_arg(name="new_config", exists=True)
@click.pass_context
def cli(ctx, old_config, new_config):
"""Update path of registered config file.
aliases: rename
"""
with config_manager.config_manager(state_dir=ctx.parent.state_dir) as cm:
cm.rename(old_config, new_config)
|
py | 7dfe3565d9f5b6cba9d2f6607e767cb8743c3494 | from rlbench.environment import Environment
from rlbench.action_modes import ActionMode, ArmActionMode
from rlbench.tasks import FS10_V1
from rlbench.tasks.open_drawer import OpenDrawer
from rlbench.tasks import ReachTarget
from rlbench.observation_config import ObservationConfig, CameraConfig
import numpy as np
import skvideo.io
live_demos = True
DATASET = '' if live_demos else 'PATH/TO/YOUR/DATASET'
save_to_dir = 'gitignore/rbench/reach_'
camera_config = CameraConfig(image_size=(500, 300))
obs_config = ObservationConfig()
obs_config.set_all(False)
obs_config.left_shoulder_camera = camera_config
obs_config.right_shoulder_camera = camera_config
obs_config.set_all_low_dim(True)
action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
env = Environment(action_mode, DATASET, obs_config, False)
env.launch()
# train_tasks = FS10_V1['train']
# test_tasks = FS10_V1['test']
# task_to_train = np.random.choice(train_tasks, 1)[0]
# import ipdb; ipdb.set_trace()
# print(action_mode.action_size)
task = env.get_task(ReachTarget)
task.sample_variation() # random variation
descriptions, obs = task.reset()
# obs, reward, terminate = task.step(np.random.normal(size=action_mode.action_size))
for j in range(3, 10):
demos = task.get_demos(1, live_demos=True) # -> List[List[Observation]]
demos = np.array(demos).flatten()
np.save(save_to_dir + "demos_%d.npy" % j, demos)
d = demos
obs_right = []
obs_left = []
for i in range(len(d)):
obs_left.append(d[i].left_shoulder_rgb)
obs_right.append(d[i].right_shoulder_rgb)
videodata = (np.array(obs_left) * 255).astype(int)
filename = save_to_dir + "demo_left_%d.mp4" % j
skvideo.io.vwrite(filename, videodata)
videodata = (np.array(obs_right) * 255).astype(int)
filename = save_to_dir + "demo_right_%d.mp4" % j
skvideo.io.vwrite(filename, videodata)
|
py | 7dfe35a9709b8b7510936965111d85ba86149f53 | import support_functions
import vm_functions
import unittest
version_good = "6.1.26r145957"
vm_good = "ws2019"
vm_bad = "bad"
snapshot_good = "live"
snapshot_bad = "bad"
file_good = "./firefox.exe"
file_bad = "./bad.exe"
file_dst = "C:\\windows\\temp\\file.exe"
user_good = "Administrator"
pass_good = "12345678"
user_bad = "bad"
pass_bad = "bad"
ips_good = ["10.0.2.15", "192.168.56.113"]
class TestStringMethods(unittest.TestCase):
# vm_functions options
vm_functions.logging.disable()
vm_functions.vboxmanage_path = "vboxmanage"
vm_functions.timeout = 60
def test01_file_info(self):
result = support_functions.file_info(file_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1],"f2d2638afb528c7476c9ee8e83ddb20e686b0b05f53f2f966fd9eb962427f8aa",)
self.assertEqual(result[2], "374fb48a959a96ce92ae0e4346763293")
self.assertEqual(result[3], 1070)
def test02_file_info_nonexisted(self):
result = support_functions.file_info(file_bad)
self.assertEqual(result, 1)
def test03_virtualbox_version(self):
result = vm_functions.virtualbox_version()
self.assertEqual(result[0], 0)
self.assertEqual(result[1], version_good)
self.assertEqual(result[2], "")
def test04_vm_start(self):
vm_functions.vm_stop(vm_good)
result = vm_functions.vm_start(vm_good)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertRegex(result[1], f'VM "{vm_good}" has been successfully started.')
self.assertEqual(result[2], "")
def test05_vm_start_running(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_start(vm_good)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "is already locked by a session")
def test06_vm_start_nonexisting(self):
result = vm_functions.vm_start(vm_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Could not find a registered machine")
def test07_vm_upload(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_upload(vm_good, user_good, pass_good, file_good, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], "")
self.assertEqual(result[2], "")
def test08_vm_upload_nonexisting_file(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_upload(vm_good, user_good, pass_good, file_bad, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "VERR_FILE_NOT_FOUND")
def test09_vm_upload_incorrect_credentials(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_upload(vm_good, user_bad, pass_bad, file_good, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "The specified user was not able to logon on guest")
def test10_vm_download_incorrect_credentials(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_download(vm_good, user_good, pass_bad, file_bad, file_dst)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "The specified user was not able to logon on guest")
def test11_vm_download_nonexisting_file(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_download(vm_good, user_good, pass_good, file_dst, file_bad)
vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Querying guest file information failed")
def test12_vm_stop(self):
vm_functions.vm_start(vm_good)
result = vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "100%")
def test13_vm_stop_stopped(self):
vm_functions.vm_stop(vm_good)
result = vm_functions.vm_stop(vm_good)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "VBOX_E_INVALID_VM_STATE")
def test14_vm_snapshot_restore_good(self):
result = vm_functions.vm_snapshot_restore(vm_good, snapshot_good)
self.assertEqual(result[0], 0)
self.assertRegex(result[1], "Restoring snapshot")
self.assertRegex(result[2], "100%")
def test15_vm_snapshot_restore_nonexisting_a(self):
result = vm_functions.vm_snapshot_restore(vm_good, snapshot_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Could not find a snapshot")
def test16_vm_snapshot_restore_nonexisting_b(self):
result = vm_functions.vm_snapshot_restore(vm_bad, snapshot_bad)
self.assertEqual(result[0], 1)
self.assertEqual(result[1], "")
self.assertRegex(result[2], "Could not find a registered machine")
def test17_list_ips(self):
vm_functions.vm_start(vm_good)
result = vm_functions.list_ips(vm_good)
self.assertEqual(result[0], 0)
self.assertEqual(result[1], ips_good)
self.assertEqual(result[2], "")
if __name__ == "__main__":
unittest.main()
|
py | 7dfe3629745df4f80e8302f7f4e5175a151698a8 | from point.util.queue import Queue
#from geweb import log
from point.util.env import env
from hashlib import md5
from urllib import quote_plus
try:
import re2 as re
except ImportError:
import re
import settings
# utilities
def make_avatar(path, filename, remove=True, old=None):
queue = Queue('imgq', settings.imgproc_socket)
queue.push({'fn': 'avatar',
'path': path, 'filename': filename, 'remove': remove, 'old': old})
def move_avatar(old, new):
queue = Queue('imgq', settings.imgproc_socket)
queue.push({'fn': 'move_avatar', 'old': old, 'new': new})
def remove_avatar(filename):
queue = Queue('imgq', settings.imgproc_socket)
queue.push({'fn': 'remove_avatar', 'filename': filename})
def make_attach(path, dest, filename, remove=True):
queue = Queue('imgq', settings.imgproc_socket)
queue.push({'fn': 'attach',
'path': path, 'dest': dest, 'filename': filename,
'remove': remove})
def remove_attach(filename):
queue = Queue('imgq', settings.imgproc_socket)
queue.push({'fn': 'remove_attach', 'filename': filename})
def make_thumbnail(url):
queue = Queue('imgq', settings.imgproc_socket)
queue.push({'fn': 'thumbnail', 'url': url})
def imgproc_url(url):
if isinstance(url, unicode):
url = url.encode('utf-8')
h = md5(re.sub('"', '%22', url)).hexdigest()
return 'https%s/%s/%s?u=%s' % (settings.thumbnail_root, h[:2], h,
quote_plus(url))
|
py | 7dfe373b454ee5f984d781dd9befcb0ff6f2c19d | """ Crypto.py:
Set of crypto functions and operations for Google Maps APIs
"""
__author__ = "Faissal Elamraoui"
__copyright__ = "Copyright 2015, [email protected]"
__license__ = "The MIT Licence"
__version__ = "1.0"
__maintainer__ = "Faissal Elamraoui"
__email__ = "[email protected]"
__status__ = "Production"
import base64
import hashlib
import hmac
""" Signs the path+query part of the URL using the provided private key.
:param urlToSign: the path+query part of the URL
:param privateKey: the base64 encoded binary secret
:return string: base64 encoded signature
"""
def url_signer(urlToSign, privateKey):
signature = hmac.new(base64.urlsafe_b64decode(privateKey), urlToSign, hashlib.sha1)
return base64.urlsafe_b64encode(signature.digest())
""" URL encodes the parameters.
:param params: The parameters
:type string: URL encoded parameters
"""
def urlencode_params(params):
params = sorted(params.items())
return "&".join("%s=%s" % (k,v) for k,v in params)
|
py | 7dfe38650c274d2159715dc7c186dffa5d770548 | """
WSGI config for djangokam project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# djangokam directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "djangokam"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
py | 7dfe38ac8dd46ec98191d3a9c1b232ee0abb5b7c | from __future__ import print_function
import sys
from pyspark.sql import SparkSession
#<1> Import the print() function
#<2> Import System-specific parameters and functions
#<3> Import SparkSession from the pyspark.sql module
#===================
# function: `create_pair` to accept
# a String object as "key,number" and
# returns a (key, number) pair.
#
# record as String of "key,number"
def create_pair(record):
tokens = record.split(",")
# key -> tokens[0] as String
# number -> tokens[1] as Integer
return (tokens[0], int(tokens[1]))
# end-of-function
#===================
# function: `add_pairs` accept two
# tuples of (sum1, count1) and (sum2, count2)
# and returns sum of tuples (sum1+sum2, count1+count2).
#
# a = (sum1, count1)
# b = (sum2, count2)
def add_pairs(a, b):
# sum = sum1+sum2
sum = a[0] + b[0]
# count = count1+count2
count = a[1] + b[1]
return (sum, count)
# end-of-function
#===================
if __name__ == '__main__':
# <4>
if len(sys.argv) != 2:
print("Usage: ", __file__, " <input-path>", file=sys.stderr)
exit(-1)
# <5>
spark = SparkSession\
.builder\
.appName("average_monoid_use_aggregatebykey")\
.getOrCreate()
# sys.argv[0] is the name of the script.
# sys.argv[1] is the first parameter
# <6>
input_path = sys.argv[1]
print("input_path: {}".format(input_path))
# read input and create an RDD<String>
# <7>
records = spark.sparkContext.textFile(input_path)
print("records.count(): ", records.count())
print("records.collect(): ", records.collect())
# create a pair of (key, number) for "key,number"
# <8>
pairs = records.map(create_pair)
print("pairs.count(): ", pairs.count())
print("pairs.collect(): ", pairs.collect())
#============================================================
# aggregateByKey(
# zeroValue,
# seqFunc,
# combFunc,
# numPartitions=None,
# partitionFunc=<function portable_hash at 0x7f51f1ac0668>
# )
#
# Aggregate the values of each key, using given combine
# functions and a neutral "zero value". This function can
# return a different result type, U, than the type of the
# values in this RDD, V. Thus, we need one operation (seqFunc)
# for merging a V into a U and one operation (combFunc) for
# merging two U's, The former operation is used for merging
# values within a partition, and the latter is used for merging
# values between partitions. To avoid memory allocation, both
# of these functions are allowed to modify and return their
# first argument instead of creating a new U.
#
# RDD<K,U> aggregateByKey(
# U zero_value,
# Function2<U,V,U> seqFunc,
# Function2<U,U,U> combFunc
# )
#============================================================
# aggregate the (sum, count) of each unique key
# <9>
# U is a pair (sum, count)
# zero_value = (0, 0) = (local_sum, local_count)
zero_value = (0, 0)
sum_count = pairs.aggregateByKey(\
zero_value,\
lambda U, v: (U[0]+v, U[1]+1),\
lambda U1, U2: (U1[0]+U2[0], U1[1]+U2[1])\
)
#
print("sum_count.count(): ", sum_count.count())
print("sum_count.collect(): ", sum_count.collect())
# create the final RDD as RDD[key, average]
# <10>
# v = (v[0], v[1]) = (sum, count)
averages = sum_count.mapValues(lambda v : float(v[0]) / float(v[1]))
print("averages.count(): ", averages.count())
print("averages.collect(): ", averages.collect())
# done!
spark.stop()
#end-program
#<4> Make sure that we have 2 parameters in the command line
#<5> Create an instance of a SparkSession object by using the builder pattern SparkSession.builder class
#<6> Define input path (this can be a file or a directory containing any number of files
#<7> Read input and create the first RDD as RDD[String] where each object has this foramt: "key,number"
#<8> Create (key, value) pairs RDD as (key, number)
#<9> Use aggregateByKey() to create (key, (sum, count)) per key
#<10> Apply the mapValues() transformation to find final average per key
|
py | 7dfe38b0948142c03ec2180007dc9d39ad60eec3 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, DefaultDict, Iterable, Tuple
from pants.backend.java.subsystems.java_infer import JavaInferSubsystem
from pants.build_graph.address import Address
from pants.engine.rules import collect_rules, rule
from pants.engine.target import AllTargets, Targets
from pants.jvm.dependency_inference.jvm_artifact_mappings import JVM_ARTIFACT_MAPPINGS
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import (
JvmArtifactArtifactField,
JvmArtifactGroupField,
JvmArtifactPackagesField,
JvmProvidesTypesField,
JvmResolveField,
)
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
_ResolveName = str
@dataclass(frozen=True)
class UnversionedCoordinate:
group: str
artifact: str
@classmethod
def from_coord_str(cls, coord: str) -> UnversionedCoordinate:
coordinate_parts = coord.split(":")
if len(coordinate_parts) != 2:
raise ValueError(f"Invalid coordinate specifier: {coord}")
return UnversionedCoordinate(group=coordinate_parts[0], artifact=coordinate_parts[1])
class AvailableThirdPartyArtifacts(
FrozenDict[
Tuple[_ResolveName, UnversionedCoordinate], Tuple[Tuple[Address, ...], Tuple[str, ...]]
]
):
"""Maps coordinates and resolve names to target `Address`es and declared packages."""
class MutableTrieNode:
__slots__ = [
"children",
"recursive",
"addresses",
"first_party",
] # don't use a `dict` to store attrs
def __init__(self):
self.children: dict[str, MutableTrieNode] = {}
self.recursive: bool = False
self.addresses: OrderedSet[Address] = OrderedSet()
self.first_party: bool = False
def ensure_child(self, name: str) -> MutableTrieNode:
if name in self.children:
return self.children[name]
node = MutableTrieNode()
self.children[name] = node
return node
@frozen_after_init
class FrozenTrieNode:
__slots__ = [
"_is_frozen",
"_children",
"_recursive",
"_addresses",
"_first_party",
] # don't use a `dict` to store attrs (speeds up attr access significantly)
def __init__(self, node: MutableTrieNode) -> None:
children = {}
for key, child in node.children.items():
children[key] = FrozenTrieNode(child)
self._children: FrozenDict[str, FrozenTrieNode] = FrozenDict(children)
self._recursive: bool = node.recursive
self._addresses: FrozenOrderedSet[Address] = FrozenOrderedSet(node.addresses)
self._first_party: bool = node.first_party
def find_child(self, name: str) -> FrozenTrieNode | None:
return self._children.get(name)
@property
def recursive(self) -> bool:
return self._recursive
@property
def first_party(self) -> bool:
return self._first_party
@property
def addresses(self) -> FrozenOrderedSet[Address]:
return self._addresses
def __hash__(self) -> int:
return hash((self._children, self._recursive, self._addresses))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, FrozenTrieNode):
return False
return (
self._children == other._children
and self.recursive == other.recursive
and self.addresses == other.addresses
)
def __repr__(self):
return f"FrozenTrieNode(children={repr(self._children)}, recursive={self._recursive}, addresses={self._addresses}, first_party={self._first_party})"
class AllJvmArtifactTargets(Targets):
pass
class AllJvmTypeProvidingTargets(Targets):
pass
@rule(desc="Find all jvm_artifact targets in project", level=LogLevel.DEBUG)
def find_all_jvm_artifact_targets(targets: AllTargets) -> AllJvmArtifactTargets:
return AllJvmArtifactTargets(
tgt for tgt in targets if tgt.has_fields((JvmArtifactGroupField, JvmArtifactArtifactField))
)
@rule(desc="Find all targets with experimental_provides fields in project", level=LogLevel.DEBUG)
def find_all_jvm_provides_fields(targets: AllTargets) -> AllJvmTypeProvidingTargets:
return AllJvmTypeProvidingTargets(
tgt
for tgt in targets
if tgt.has_field(JvmProvidesTypesField) and tgt[JvmProvidesTypesField].value is not None
)
@dataclass(frozen=True)
class ThirdPartyPackageToArtifactMapping:
mapping_roots: FrozenDict[_ResolveName, FrozenTrieNode]
def addresses_for_symbol(self, symbol: str, resolve: str) -> FrozenOrderedSet[Address]:
imp_parts = symbol.split(".")
# Note that it's possible to have a resolve with no associated artifacts.
current_node = self.mapping_roots.get(resolve)
if not current_node:
return FrozenOrderedSet()
found_nodes = []
for imp_part in imp_parts:
child_node_opt = current_node.find_child(imp_part)
if not child_node_opt:
break
found_nodes.append(child_node_opt)
current_node = child_node_opt
if not found_nodes:
return FrozenOrderedSet()
# If the length of the found nodes equals the number of parts of the package path, then
# there is an exact match.
if len(found_nodes) == len(imp_parts):
best_match = found_nodes[-1]
if best_match.first_party:
return FrozenOrderedSet() # The first-party symbol mapper should provide this dep
return found_nodes[-1].addresses
# Otherwise, check for the first found node (in reverse order) to match recursively, and
# use its coordinate.
for found_node in reversed(found_nodes):
if found_node.recursive:
return found_node.addresses
# Nothing matched so return no match.
return FrozenOrderedSet()
@rule
async def find_available_third_party_artifacts(
all_jvm_artifact_tgts: AllJvmArtifactTargets, jvm: JvmSubsystem
) -> AvailableThirdPartyArtifacts:
address_mapping: DefaultDict[
tuple[_ResolveName, UnversionedCoordinate], OrderedSet[Address]
] = defaultdict(OrderedSet)
package_mapping: DefaultDict[
tuple[_ResolveName, UnversionedCoordinate], OrderedSet[str]
] = defaultdict(OrderedSet)
for tgt in all_jvm_artifact_tgts:
coord = UnversionedCoordinate(
group=tgt[JvmArtifactGroupField].value, artifact=tgt[JvmArtifactArtifactField].value
)
resolve = tgt[JvmResolveField].normalized_value(jvm)
key = (resolve, coord)
address_mapping[key].add(tgt.address)
package_mapping[key].update(tgt[JvmArtifactPackagesField].value or ())
return AvailableThirdPartyArtifacts(
{
key: (tuple(addresses), tuple(package_mapping[key]))
for key, addresses in address_mapping.items()
}
)
@rule
async def compute_java_third_party_artifact_mapping(
java_infer_subsystem: JavaInferSubsystem,
available_artifacts: AvailableThirdPartyArtifacts,
all_jvm_type_providing_tgts: AllJvmTypeProvidingTargets,
) -> ThirdPartyPackageToArtifactMapping:
"""Implements the mapping logic from the `jvm_artifact` and `java-infer` help."""
def insert(
mapping: MutableTrieNode,
package_pattern: str,
addresses: Iterable[Address],
first_party: bool,
) -> None:
imp_parts = package_pattern.split(".")
recursive = False
if imp_parts[-1] == "**":
recursive = True
imp_parts = imp_parts[0:-1]
current_node = mapping
for imp_part in imp_parts:
child_node = current_node.ensure_child(imp_part)
current_node = child_node
current_node.addresses.update(addresses)
current_node.first_party = first_party
current_node.recursive = recursive
# Build a default mapping from coord to package.
# TODO: Consider inverting the definitions of these mappings.
default_coords_to_packages: dict[UnversionedCoordinate, OrderedSet[str]] = defaultdict(
OrderedSet
)
for package, unversioned_coord_str in {
**JVM_ARTIFACT_MAPPINGS,
**java_infer_subsystem.third_party_import_mapping,
}.items():
unversioned_coord = UnversionedCoordinate.from_coord_str(unversioned_coord_str)
default_coords_to_packages[unversioned_coord].add(package)
# Build mappings per resolve from packages to addresses.
mappings: DefaultDict[_ResolveName, MutableTrieNode] = defaultdict(MutableTrieNode)
for (resolve_name, coord), (addresses, packages) in available_artifacts.items():
if not packages:
# If no packages were explicitly defined, fall back to our default mapping.
packages = tuple(default_coords_to_packages[coord])
if not packages:
# Default to exposing the `group` name as a package.
packages = (f"{coord.group}.**",)
mapping = mappings[resolve_name]
for package in packages:
insert(mapping, package, addresses, first_party=False)
# Mark types that have strong first-party declarations as first-party
for tgt in all_jvm_type_providing_tgts:
for provides_type in tgt[JvmProvidesTypesField].value or []:
for mapping in mappings.values():
insert(mapping, provides_type, [], first_party=True)
return ThirdPartyPackageToArtifactMapping(
FrozenDict(
(resolve_name, FrozenTrieNode(mapping)) for resolve_name, mapping in mappings.items()
)
)
def rules():
return collect_rules()
|
py | 7dfe396d761cdc56d016243df9c317a64577f823 | # -*- coding: utf-8 -*-
"""
Widgets module.
This module provides the Widget class and a real-time method, used to register
a instance of Widget as real-time. The instance has to be registered at
compile time in order for Django to know the URL used to return contents.
"""
from __future__ import unicode_literals
from hashlib import sha256
from .apps import AppSettings
app_settings = AppSettings()
REALTIME_WIDGETS = []
def realtime(widget, url_name=None, url_regex=None, time_interval=None):
"""
Return a widget as real-time.
Args:
widget (Widget): the widget to register and return as real-time.
url_name (str): the URL name to call to get updated content.
url_regex (regex): the URL regex to be matched.
time_interval (int): the interval of refreshment in milliseconds.
Returns:
Widget: the "real-timed" widget.
"""
if not hasattr(widget, 'get_updated_content'):
raise AttributeError('Widget %s must implement get_updated_content '
'method.' % widget)
elif not callable(widget.get_updated_content):
raise ValueError('get_updated_content in widget %s is not callable'
% widget)
if url_name is None:
if getattr(widget, 'url_name', None) is not None:
url_name = widget.url_name
else:
url_name = widget.__class__.__name__
if url_name in [w.url_name for w in REALTIME_WIDGETS]:
raise ValueError('URL name %s is already used by another '
'real time widget.' % url_name)
if url_regex is None:
if getattr(widget, 'url_regex', None) is not None:
url_regex = widget.url_regex
else:
url_regex = sha256(url_name.encode('utf-8'))
url_regex = url_regex.hexdigest()[:32]
url_regex = 'realtime/' + url_regex
if url_regex in [w.url_regex for w in REALTIME_WIDGETS]:
raise ValueError('URL regex %s is already used by another '
'real time widget.' % url_regex)
if time_interval is None:
if getattr(widget, 'time_interval', None) is not None:
time_interval = widget.time_interval
else:
time_interval = app_settings.default_time_interval
from django.views.generic import View
from braces.views import AjaxResponseMixin, JSONResponseMixin
# pylama:ignore=C0111,R0201
class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View):
def get_data(self):
return widget.get_updated_content()
def get(self, request, *args, **kwargs):
return self.get_ajax(request, *args, **kwargs)
def get_ajax(self, request, *args, **kwargs):
return self.render_json_response(self.get_data())
PartialResponse.url_name = url_name
PartialResponse.url_regex = url_regex
PartialResponse.time_interval = time_interval
REALTIME_WIDGETS.append(PartialResponse)
if not hasattr(widget, 'url_name'):
widget.url_name = url_name
if not hasattr(widget, 'url_regex'):
widget.url_regex = url_regex
if not hasattr(widget, 'time_interval'):
widget.time_interval = time_interval
return widget
class Widget(object):
"""Widget class."""
def __init__(self,
html_id=None,
name=None,
content=None,
template=None,
classes=None,
**kwargs):
"""
Init method.
Args:
html_id (str): an ID to set on the HTML item.
name (str): the name of the item, displayed in HTML.
content (): suitable content according to chosen display.
template (str): the template responsible for display.
classes (str): additional classes to pass to the HTML item.
"""
if html_id is not None:
try:
self.html_id = html_id
except AttributeError:
self._html_id = html_id
if name is not None:
try:
self.name = name
except AttributeError:
self._name = name
if content is not None:
try:
self.content = content
except AttributeError:
self._content = content
if template is not None:
try:
self.template = template
except AttributeError:
self._template = template
if classes is not None:
try:
self.classes = classes
except AttributeError:
self._classes = classes
if not hasattr(self, 'template'):
raise AttributeError('template is a required widget attribute')
for kw, arg in kwargs.items():
setattr(self, kw, arg)
def get_updated_content(self):
"""Return updated content (for real-time widgets)."""
return self.content
|
py | 7dfe399e4172c342ee87954502e6160452db3942 | import pytest
from pymoku.instruments import WaveformGenerator
try:
from unittest.mock import ANY
except ImportError:
from mock import ANY
@pytest.fixture
def dut(moku):
i = WaveformGenerator()
moku.deploy_instrument(i)
moku.reset_mock()
return i
def test_set_defaults(dut, moku):
'''
TODO Default test
'''
dut.set_defaults()
moku._write_regs.assert_called_with(ANY)
def test_gen_sinewave(dut, moku):
'''
TODO Default test
'''
dut.gen_sinewave(1, 1.0, 100e3)
moku._write_regs.assert_called_with(ANY)
def test_gen_squarewave(dut, moku):
'''
TODO Default test
'''
dut.gen_squarewave(1, 1.0, 100e3)
moku._write_regs.assert_called_with(ANY)
def test_gen_rampwave(dut, moku):
'''
TODO Default test
'''
dut.gen_rampwave(1, 1.0, 100e3)
moku._write_regs.assert_called_with(ANY)
def test_gen_off(dut, moku):
'''
TODO Default test
'''
dut.gen_off()
moku._write_regs.assert_called_with(ANY)
def test_set_trigger(dut, moku):
'''
TODO Default test
'''
dut.set_trigger(1, 'sweep', sweep_start_freq=10.0, sweep_end_freq=100.0)
moku._write_regs.assert_called_with(ANY)
def test_gen_modulate_off(dut, moku):
'''
TODO Default test
'''
dut.gen_modulate_off()
moku._write_regs.assert_called_with(ANY)
def test_gen_trigger_off(dut, moku):
'''
TODO Default test
'''
dut.gen_trigger_off()
moku._write_regs.assert_called_with(ANY)
def test_set_modulate_trig_off(dut, moku):
'''
TODO Default test
'''
dut.set_modulate_trig_off()
moku._write_regs.assert_called_with(ANY)
def test_gen_modulate(dut, moku):
'''
TODO Default test
'''
dut.gen_modulate(1, 'amplitude', 'adc1', 0.5)
moku._write_regs.assert_called_with(ANY)
@pytest.mark.parametrize('attr, value', [
('adc1_statuslight', 0),
('amod_enable_ch1', 0),
('fmod_enable_ch1', 0),
('pmod_enable_ch1', 0),
('sweep_enable_ch1', 0),
('reverse_sweep_ch1', 0),
('mod_source_ch1', 0),
('atten_compensate_ch1', 0),
('trig_source_ch1', 0),
('range_shift_ch1', 0),
('sine_trigdly_ch1', 0),
('phasedly_en_ch1', 0),
('trig_sweep_mode_ch1', 0),
('gate_mode_ch1', 0),
('mod_depth_ch1', 0),
('gate_thresh_ch1', 0),
('enable_ch1', 0),
('waveform_type_ch1', 0),
('amplitude_ch1', 1.0),
('offset_ch1', 0.0),
('t0_ch1', 1.0),
('t1_ch1', 1.0),
('t2_ch1', 1.0),
('riserate_ch1', 1.0),
('fallrate_ch1', 1.0),
('enable_reset_ch1', 0),
('phase_dly_ch1', 0),
('adc2_statuslight', 0),
('amod_enable_ch2', 0),
('fmod_enable_ch2', 0),
('pmod_enable_ch2', 0),
('sweep_enable_ch2', 0),
('reverse_sweep_ch2', 0),
('mod_source_ch2', 0),
('atten_compensate_ch2', 0),
('trig_source_ch2', 0),
('range_shift_ch2', 0),
('sine_trigdly_ch2', 0),
('phasedly_en_ch2', 0),
('trig_sweep_mode_ch2', 0),
('gate_mode_ch2', 0),
('mod_depth_ch2', 0),
('gate_thresh_ch2', 0),
('enable_ch2', 0),
('waveform_type_ch2', 0),
('amplitude_ch2', 1.0),
('offset_ch2', 0.0),
('t0_ch2', 1.0),
('t1_ch2', 1.0),
('t2_ch2', 1.0),
('riserate_ch2', 1.0),
('fallrate_ch2', 1.0),
('enable_reset_ch2', 1),
('phase_dly_ch2', 1),
])
def test_attributes(dut, moku, attr, value):
'''
TODO Default test
'''
setattr(dut, attr, value)
dut.commit()
moku._write_regs.assert_called_with(ANY)
|
py | 7dfe39fe28b12f446015a332cfeed3a6b5502433 | # Copyright (c) 2019. Partners HealthCare, Harvard Medical School’s
# Department of Biomedical Informatics, Sergey Trifonov
#
# Developed by Sergey Trifonov and Michael Bouzinier, based on contributions by:
# Anwoy Kumar Mohanty, Andrew Bjonnes,
# Ignat Leshchiner, Shamil Sunyaev and other members of Division of Genetics,
# Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pysam, json
from glob import glob
import numpy as np
from typing import List
from denovo2.adlib.ad_lib import AD_LibReader
from denovo2.hg_conv import Hg19_38
#========================================
class PysamList:
def __init__(self, file_with_list_of_filenames: str = None,
list_of_bams: List = None):
self.mSamFiles = []
if (file_with_list_of_filenames):
list_of_bams = []
with open(file_with_list_of_filenames, "r") as inp:
for line in inp:
filename = line.partition('#')[0].strip()
if not filename:
continue
list_of_bams.append(filename)
for filename in list_of_bams:
samfile = pysam.AlignmentFile(filename, "rb")
print("Load pysam file:", filename, "\n",
samfile.check_index())
self.mSamFiles.append(samfile)
def mineAD(self, variant):
ADfs, ADrs = [], []
for samfile in self.mSamFiles:
ADf, ADr = mineAD_ord(samfile, variant)
if ADf is None:
return None, None
ADfs.append(ADf)
ADrs.append(ADr)
if len(ADfs) == 0:
return None, None
return np.array(ADfs), np.array(ADrs)
#========================================
def _pileup(samfile, chrom, pos_from, pos_to):
sam_chrom = (str(chrom) if 0 < chrom <= 22
else {0: "M", 23: "X", 24: "Y"}[chrom])
try:
return samfile.pileup("chr" + sam_chrom, pos_from, pos_to)
except ValueError:
pass
if chrom == 0:
sam_chrom = "MT"
return samfile.pileup(sam_chrom, pos_from, pos_to)
#========================================
MQ_thresh = -100.
BQ_thresh = -100.
#========================================
def mineAD_ord(samfile, variant):
global MQ_thresh, BQ_thresh, sLiftOverH
ADf, ADr = np.array([0., 0.]), np.array([0., 0.])
if variant.getBaseRef() == "hg38":
pos = Hg19_38.convertPos(
variant.getChromNum(), variant.getPos())
if pos is None:
return None, None
position = pos - 1
else:
position = variant.getPos() - 1
for pileupcolumn in _pileup(samfile, variant.getChromNum(),
position, position + 1):
if pileupcolumn.pos != position:
continue
for pileupread in pileupcolumn.pileups:
if pileupread.is_del or pileupread.is_refskip:
continue
q_pos = pileupread.query_position
MQ = pileupread.alignment.mapping_quality
BQ = pileupread.alignment.query_qualities[q_pos]
if MQ < MQ_thresh or BQ < BQ_thresh:
continue
if (variant.getRef().upper()
== pileupread.alignment.query_sequence[q_pos].upper()):
if pileupread.alignment.is_reverse:
ADr[0] += 1
else:
ADf[0] += 1
else:
if pileupread.alignment.is_reverse:
ADr[1] += 1
else:
ADf[1] += 1
return ADf,ADr
#========================================
class AD_LibCollection:
def __init__(self, lib_dir, dump_file = None):
self.mLibSeq = []
for fname in sorted(list(glob(lib_dir + "/*.ldx"))):
self.mLibSeq.append(AD_LibReader(fname))
self.mDumpFile = dump_file
self.mDumpDict = dict()
def _nextPortions(self):
return self.mLibSeq[0]._nextPortions()
def mineAD(self, variant):
ADfs, ADrs = [], []
for lib in self.mLibSeq:
seq = lib.getAD_seq(variant.getChromNum(), variant.getPos())
if seq:
for fam_vec in seq:
ADfs.append(fam_vec[0])
ADrs.append(fam_vec[1])
if self.mDumpFile:
key = "%d/%d" % (variant.getChromNum(), variant.getPos())
if key not in self.mDumpDict:
self.mDumpDict[key] = [
[[vec[0], vec[1]] for vec in ADfs],
[[vec[0], vec[1]] for vec in ADrs]]
if len(ADfs) == 0:
return None, None
return np.array(ADfs), np.array(ADrs)
def finishUp(self):
if self.mDumpFile:
with open(self.mDumpFile, "w") as outp:
outp.write(json.dumps(self.mDumpDict,
indent = 4, sort_keys = True))
#========================================
|
py | 7dfe3a108cc396b1ff98206104714c857e89d3f8 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from decimal import Decimal
from test_framework.blocktools import (
create_confirmed_utxos,
send_big_transactions,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
class MempoolLimitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-acceptnonstdtxn=1",
"-maxmempool=5",
"-spendzeroconfchange=0",
]]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()[
'minrelaytxfee'], Decimal('0.001000'))
assert_equal(self.nodes[0].getmempoolinfo()[
'mempoolminfee'], Decimal('0.001000'))
txids = []
utxo_groups = 4
utxos = create_confirmed_utxos(self.nodes[0], 1 + 30 * utxo_groups)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.01}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with low fee
self.nodes[0].settxfee(relayfee)
txF = self.nodes[0].fundrawtransaction(tx)
# return to automatic fee selection
self.nodes[0].settxfee(0)
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
for i in range(utxo_groups):
txids.append([])
txids[i] = send_big_transactions(
self.nodes[0], utxos[30 * i:30 * i + 30], 30, 10 * (i + 1))
self.log.info('The tx should be evicted by now')
assert txid not in self.nodes[0].getrawmempool()
txdata = self.nodes[0].gettransaction(txid)
# confirmation should still be 0
assert txdata['confirmations'] == 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()[
'minrelaytxfee'], Decimal('0.001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()[
'mempoolminfee'], Decimal('0.001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{"txid": us0["txid"], "vout": us0["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.01}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with a fee < mempoolminfee, >= than
# minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met",
self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
|
py | 7dfe3a7d7009f7c7a9ffb59d867ef35cd66710e7 | from __future__ import print_function
from __future__ import division
from tigre.utilities.parkerweight import parkerweight
import numpy as np
import warnings
#TODO: Fix parker
def filtering(proj,geo,angles,parker,verbose=False):
if parker:
proj=parkerweight(proj.transpose(0,2,1),geo,angles,parker).transpose(0,2,1)
# proj=parkerweight(proj,geo,angles,parker)
filt_len=max(64,2**nextpow2(2*max(geo.nDetector)))
ramp_kernel=ramp_flat(filt_len)
d=1
filt=filter(geo.filter,ramp_kernel[0],filt_len,d,verbose=verbose)
filt=np.kron(np.ones((np.int64(geo.nDetector[0]),1)),filt)
for i in range(angles.shape[0]):
fproj=np.zeros((geo.nDetector[0],filt_len),dtype=np.float32)
fproj[:,int(filt_len/2-geo.nDetector[1]/2):int(filt_len/2+geo.nDetector[1]/2)]=proj[i]
fproj=np.fft.fft(fproj,axis=1)
fproj=fproj*filt
fproj=np.real(np.fft.ifft(fproj,axis=1))
proj[i]=fproj[:,int(filt_len/2-geo.nDetector[1]/2):int(filt_len/2+geo.nDetector[1]/2)]/2/geo.dDetector[0]*(2*np.pi/
len(angles)
)/2*(geo.DSD[0]/geo.DSO[0])
return proj
def ramp_flat(n,verbose=False):
nn=np.arange(-n/2,n/2)
h=np.zeros(nn.shape,dtype=np.float32)
h[int(n/2)]=1/4
odd=nn%2==1
h[odd]=-1/(np.pi*nn[odd])**2
return h, nn
def filter(filter,kernel,order,d,verbose=False):
f_kernel=abs(np.fft.fft(kernel))*2
filt=f_kernel[:int((order/2)+1)]
w=2*np.pi*np.arange(len(filt))/order
if filter not in ['ram_lak','shepp_logan','cosine','hamming','hann',None]:
raise ValueError('filter not recognised: '+str(filter))
if filter in {'ram_lak', None}:
if filter is None:
if verbose:
warnings.warn('no filter selected, using default ram_lak')
pass
if filter=='shepp_logan':
filt[1:]*=(np.sin(
w[1:]/(2*d))/(w[1:]/(2*d)
)
)
if filter=='cosine':
filt[1:]*=np.cos(w[1:]/(2*d))
if filter=='hamming':
filt[1:]*=(.54+.46*np.cos(w[1:]/d))
if filter =='hann':
filt[1:]*=(1+np.cos(w[1:])/d)/2
filt[w>np.pi*d]=0
filt=np.hstack((filt,filt[1:-1][::-1]))
return filt
def nextpow2(n):
i=1
while (2**i)<n:
i+=1
return i
|
py | 7dfe3a90173055e9aa9ef9c73305767b34a634d0 | from django.contrib import admin
from licenses.models import LegalCode, License
@admin.register(LegalCode)
class LegalCodeAdmin(admin.ModelAdmin):
fields = [
"license",
"language_code",
]
list_display = [
"language_code",
"license",
]
list_filter = [
"language_code",
]
raw_id_fields = [
"license",
]
class LegalCodeInline(admin.TabularInline):
model = LegalCode
list_display = [
"url",
"language_code",
"license",
]
@admin.register(License)
class LicenseAdmin(admin.ModelAdmin):
fields = [
"about",
"license_code",
"version",
"creator_url",
"license_class_url",
"jurisdiction_code",
"source",
"is_replaced_by",
"is_based_on",
"deprecated_on",
"permits_derivative_works",
"permits_reproduction",
"permits_distribution",
"permits_sharing",
"requires_share_alike",
"requires_notice",
"requires_attribution",
"requires_source_code",
"prohibits_commercial_use",
"prohibits_high_income_nation_use",
]
inlines = [LegalCodeInline]
list_filter = [
"license_code",
"version",
"creator_url",
"license_class_url",
"jurisdiction_code",
]
raw_id_fields = [
"source",
"is_replaced_by",
"is_based_on",
]
search_fields = [
"license_code",
"version",
"about",
]
|
py | 7dfe3b36354010c06c94559fb423c7d54a7a26d9 | from django.contrib.auth.base_user import AbstractBaseUser,BaseUserManager
from django.utils.translation import ugettext_lazy as _
class CustomerUserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError(_('You have to set an email'))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff = True. '))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser = True.'))
return self.create_user(email, password, **extra_fields) |
py | 7dfe3bef15342c1ee2cb6776c89703f30734c03d | import pytest
from django.core.management import call_command
from logs.models import ImportBatch, AccessLog, Metric
from organizations.tests.conftest import organization_random # noqa - fixture
from publications.models import Title
@pytest.mark.django_db
class TestRemoveUnusedTitles:
@pytest.mark.parametrize(['do_it'], [(False,), (True,)])
def test_command(self, titles, organization_random, platform, interest_rt, do_it):
ib = ImportBatch.objects.create(
organization=organization_random, platform=platform, report_type=interest_rt
)
metric = Metric.objects.create(short_name='m1', name='Metric 1')
title1, title2 = titles
AccessLog.objects.create(
import_batch=ib,
organization=organization_random,
platform=platform,
report_type=interest_rt,
date='2020-01-01',
target=title1,
metric=metric,
value=3,
)
assert Title.objects.count() == 2
args = ['--do-it'] if do_it else []
call_command('remove_unused_titles', *args)
if do_it:
assert Title.objects.count() == 1, 'title2 is deleted as it has no usage'
assert Title.objects.get().pk == title1.pk
else:
assert Title.objects.count() == 2, 'no titles is deleted'
|
py | 7dfe3bf1e195f27009688d6819cbae974c66a2bb | # Copyright 2018 Blink Health LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
from config_yourself.exceptions import InvalidConfig, ConfigException
from config_yourself.provider.datakey import (
DataKeyService,
DataKeyDecryptError,
decrypt_key_with_password,
)
from base64 import b64decode
from binascii import Error
class Service(DataKeyService):
"""A Password CryptoService
Provide the ``password`` secret to :py:class:`~config_yourself.Config` when loading to initialize this provider.
"""
def __init__(self, config, secrets):
try:
key = config.get("key", None)
except AttributeError as e:
raise InvalidConfig(original=e)
if not key or key == "":
msg = (
"crypto.key is empty, "
"remove the crypto property if no encryption is needed"
)
raise InvalidConfig(message=msg)
try:
keyBytes = b64decode(key)
except Error:
raise InvalidConfig(message="Could not read file key as base64")
password = secrets.get("password", None)
if not password or password == "":
msg = (
"No password provided for decryption. "
"Remove the crypto property if no encryption is needed"
)
raise InvalidConfig(message=msg)
try:
# decrypt the file's key with the supplied password
dataKey = decrypt_key_with_password(keyBytes, password)
except DataKeyDecryptError:
raise PasswordDecryptError(key)
DataKeyService.__init__(self, dataKey)
class PasswordDecryptError(ConfigException):
def __init__(self, key):
msg = "Failed to decrypt with key <{}> and a supplied password".format(key)
self.message = msg
super(PasswordDecryptError, self).__init__(msg)
|
py | 7dfe3ca784e44fe8e8d82db247898c8ca6863265 | # Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SequenceMemoryStack class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import logging
import tensorflow as tf
from pagi.utils.tf_utils import tf_build_interpolate_distributions
from pagi.utils.tf_utils import tf_build_cross_entropy
from pagi.utils.tf_utils import tf_init_type_none
from pagi.utils.tf_utils import tf_init_type_normal
from pagi.utils.np_utils import np_uniform
from pagi.components.summary_component import SummaryComponent
from rsm.components.sequence_memory_layer import SequenceMemoryLayer
from rsm.components.predictor_component import PredictorComponent
class SequenceMemoryStack(SummaryComponent):
"""
A stack architecture of sequence memory layers
"""
# Static names
file = 'file'
cache = 'cache'
prediction = 'prediction'
prediction_loss = 'prediction-loss'
ensemble_top_1 = 'ensemble-top-1'
ensemble_distribution = 'ensemble-distribution'
ensemble_loss_sum = 'ensemble-loss-sum'
ensemble_perplexity = 'ensemble-perplexity'
connectivity_ff = 'ff' # Feed-forward hierarchy ONLY.
connectivity_bi = 'bi' # Bidirectional hierarchy
@staticmethod
def default_hparams():
"""Builds an HParam object with default hyperparameters."""
return tf.contrib.training.HParams(
num_layers=1,
prediction_layer=-1, # default is top of stack
connectivity='ff',
# Optimizer - applies to layers and predictor
optimizer='adam',
loss_type='mse',
learning_rate=0.0005,
batch_size=80,
# Cache predictor
cache_decay=0.9,
cache_smart=False,
# Ensemble (model interpolation)
decode_mass=0.0, # Generate a prediction directly from the RSM
file_mass=0.0, # A distribution loaded from external file
cache_mass=0.0, # Cache of older inputs
uniform_mass=0.0,
input_mass=0.0,
layer_mass=1.0, # Default to only use layer
ensemble_norm_eps=1.0e-11, # 0.0001%
mode='predict-input',
#autoencode=False,
# Memory options
memory_summarize_input=False,
memory_summarize_encoding=False,
memory_summarize_decoding=False,
memory_summarize_weights=False,
memory_summarize_freq=False,
memory_training_interval=[0, -1],
# Geometry. A special value of -1 can be used to generate a 1x1 output (non-conv)
filters_field_width=[28],
filters_field_height=[28],
filters_field_stride=[28],
pool_size=[1], # Per layer. 1 = no pooling
cols=[160],
cells_per_col=[3], # 480 = 160 columns * 3 cells
# Predictor
predictor_training_interval=[0, -1],
predictor_hidden_size=[200],
predictor_nonlinearity=['leaky-relu', 'leaky-relu'],
predictor_optimize='accuracy', # reconstruction, accuracy
predictor_loss_type='cross-entropy',
predictor_input_norm_first=True,
predictor_input_decay_rate=0.0,
predictor_keep_rate=1.0,
predictor_init_type=tf_init_type_normal,
predictor_init_type_bias=tf_init_type_normal,
predictor_init_sd=0.03,
predictor_l2=0.0,
predictor_label_smoothing=0.0,
# Memory predictor options
#predictor_integrate_input=False, deprecated
predictor_norm_type='sum',
predictor_norm_eps=1.0e-11,
# Regularization, 0=Off
f_l2=[0.0],
r_l2=[0.0],
b_l2=[0.0],
d_l2=[0.0],
f_init_type=[tf_init_type_none],
r_init_type=[tf_init_type_none],
b_init_type=[tf_init_type_none],
d_init_type=[tf_init_type_none],
f_bias_init_type=[tf_init_type_none],
r_bias_init_type=[tf_init_type_none],
b_bias_init_type=[tf_init_type_none],
d_bias_init_type=[tf_init_type_none],
f_init_sd=[0.0],
r_init_sd=[0.0],
b_init_sd=[0.0],
d_init_sd=[0.0],
f_bias=[False],
r_bias=[False],
b_bias=[False],
d_bias=[True],
# Control statistics
freq_update_interval=10,
freq_learning_rate=0.1,
freq_min=0.05, # used by lifetime sparsity mask
input_norm_first=False, # Controls order of input ops in memory
hidden_nonlinearity='tanh', # used for hidden layer only
decode_nonlinearity=['none'], # Used for decoding
decode_mode=['fc'],
boost_factor=[0.0], # Enables boost control if nonzero, replaces inhibition
boost_factor_decay=[0.0],
boost_factor_update_interval=[0], # num training batches between boost factor updates
inhibition_decay=[0.1], # controls refractory period
inhibition_with_mask=True,
hidden_keep_rate=[1.0], # Optional dropout on hidden layer
f_keep_rate=[1.0], # Optional dropout
f_decay_rate=[0.0], # Optional integrated/exp decay
f_decay_floor=[0.0], # if > 0, then clip to zero at this level
f_norm_type=[None], # Option to normalize
f_norm_eps=[1.0e-11], # Prevents norm /0
f_decay_trainable=[False],
f_decay_rate_max=[0.95], # If trainable, then this is the max decay rate
rb_keep_rate=[1.0], # Optional dropout on feedback
rb_decay_rate=[0.0], # Optional integrated/exp decay feedback
rb_decay_floor=[0.0], # if > 0, then clip to zero at this level
rb_norm_type=['sum'], # Option to normalize feedback
rb_norm_eps=[1.0e-11], # Prevents feedback norm /0
rb_decay_trainable=[False],
rb_decay_rate_max=[0.95], # If trainable, then this is the max decay rate
# Sparse parameters:
sparsity=[25],
lifetime_sparsity_dends=False,
lifetime_sparsity_cols=False,
summarize_distributions=False
)
def update_statistics(self, batch_type, session): # pylint: disable=W0613
"""Called after a batch"""
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.update_statistics(batch_type, session)
def forget_history(self, session, history_forgetting_probability, clear_previous=False):
"""Called before a batch. Stochastically forget the recurrent history."""
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.forget_history(session, history_forgetting_probability, clear_previous)
def update_history(self, session, history_mask, clear_previous=True):
"""Called before a batch. The external data defines a break in the data."""
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.update_history(session, history_mask, clear_previous)
def update_recurrent_and_feedback(self):
self.update_recurrent()
self.update_feedback()
def update_feedback(self):
"""If connectivity is bidirectional..."""
layers = self.get_num_layers()
for i in range(layers-1): # e.g. 0,1,2 = 3 layers
upper = i +1
lower = i
logging.debug('Copying feedback from layer %s to layer %s', str(upper), str(lower))
upper_layer = self.get_layer(upper)
lower_layer = self.get_layer(lower)
feedback_values = upper_layer.get_values(SequenceMemoryLayer.encoding)
lower_layer.set_feedback(feedback_values)
def update_recurrent(self):
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.update_recurrent()
def get_num_layers(self):
return self._hparams.num_layers
def get_layers(self):
return self._layers
def get_layer(self, layer=None):
return self._layers[layer]
def get_prediction_layer(self):
if self._hparams.prediction_layer < 0:
layers = self.get_num_layers()
return layers - 1
return self._hparams.prediction_layer
def get_loss(self):
if self._hparams.predictor_optimize == 'accuracy':
return self.get_values(SequenceMemoryStack.ensemble_loss_sum)
return self.get_values(SequenceMemoryStack.prediction_loss)
def build(self, input_values, input_shape, label_values, label_shape, hparams, decoder=None, name='rsm-stack'): # pylint: disable=W0221
"""Initializes the model parameters.
Args:
input_values: Tensor containing input
input_shape: The shape of the input, for display (internal is vectorized)
label_values: Tensor containing label
label_shape: The shape of the label, for display (internal is vectorized)
hparams: The hyperparameters for the model as tf.contrib.training.HParams.
name: A globally unique graph name used as a prefix for all tensors and ops.
encoding_shape: The shape to be used to display encoded (hidden layer) structures
"""
self.name = name
self._hparams = hparams
self._decoder = decoder # optional
self._input_shape = input_shape
self._label_shape = label_shape
self._label_values = label_values
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
self._build_layers(input_values, input_shape, label_values, label_shape)
# Predictor may be asked to generate a target image rather than a classification
predictor_target_values = input_values
predictor_target_shape = input_shape
# Build a predictor based on memory input
if self._hparams.layer_mass > 0.0:
logging.info('Building layer predictor...')
layer_predictor_name = 'layer-p'
layer_predictor_input, layer_predictor_input_shape = self._build_layer_prediction_input()
self._layer_predictor = self._build_predictor(layer_predictor_input, layer_predictor_input_shape, label_values,
label_shape, predictor_target_values, predictor_target_shape,
layer_predictor_name)
else:
logging.info('NOT building layer predictor.')
# Build another prediction from the input (1st order)
if self._hparams.input_mass > 0.0:
logging.info('Building input predictor...')
input_predictor_name = 'input-p'
input_predictor_input, input_predictor_input_shape = self._build_input_prediction_input()
self._input_predictor = self._build_predictor(input_predictor_input, input_predictor_input_shape, label_values,
label_shape, predictor_target_values, predictor_target_shape,
input_predictor_name)
else:
logging.info('NOT building input predictor.')
if self._hparams.cache_mass > 0.0:
self._build_cache_pl(label_values)
else:
logging.info('NOT building cache.')
# Now build an ensemble prediction from these predictions
if self._hparams.predictor_optimize == 'accuracy':
self._build_ensemble_prediction()
else:
prediction_layer_idx = self.get_prediction_layer()
prediction_layer = self._layers[prediction_layer_idx]
prediction_decoding = prediction_layer.get_op(SequenceMemoryLayer.decoding)
prediction_loss = prediction_layer.get_op(SequenceMemoryLayer.loss)
self._dual.set_op(self.prediction, prediction_decoding)
self._dual.set_op(self.prediction_loss, prediction_loss)
def _build_decode_prediction(self):
"""Transform the decoding-prediction into a prediction over the classes"""
prediction_layer_idx = self.get_prediction_layer()
prediction_layer = self._layers[prediction_layer_idx]
decoding = prediction_layer.get_op(SequenceMemoryLayer.decoding) # a prediction
prediction_logits = self._decoder.build(decoding)
return prediction_logits
def _build_ensemble_prediction(self):
"""Builds ensemble prediction."""
logging.info('Building ensemble...')
distributions = []
distribution_mass = []
num_classes = self._label_shape[-1]
if self._hparams.decode_mass > 0.0:
print('decoding...')
decode_distribution = self._build_decode_prediction()
print('one hot', decode_distribution)
# decode_sum = tf.reduce_sum(decode_distribution, axis=1, keepdims=True)# + eps
# decode_norm = decode_distribution / decode_sum
# distributions.append(decode_norm)
distributions.append(decode_distribution)
distribution_mass.append(self._hparams.decode_mass)
if self._hparams.input_mass > 0.0:
input_prediction = self._input_predictor.get_op(PredictorComponent.prediction_softmax)
distributions.append(input_prediction)
distribution_mass.append(self._hparams.input_mass)
if self._hparams.uniform_mass > 0.0:
uniform = np_uniform(num_classes)
distributions.append(uniform)
distribution_mass.append(self._hparams.uniform_mass)
if self._hparams.file_mass > 0.0:
file_pl = self._dual.add(self.file, shape=[self._hparams.batch_size, num_classes], default_value=0.0).add_pl()
file_sum = tf.reduce_sum(file_pl, axis=1, keepdims=True)# + eps
file_norm = file_pl / file_sum
distributions.append(file_norm)
distribution_mass.append(self._hparams.file_mass)
if self._hparams.cache_mass > 0.0:
cache_pl = self._dual.get_pl(self.cache)
cache_sum = tf.reduce_sum(cache_pl, axis=1, keepdims=True) + self._hparams.ensemble_norm_eps
cache_norm = cache_pl / cache_sum
distributions.append(cache_norm) # Use the old cache, not with new label ofc
distribution_mass.append(self._hparams.cache_mass)
if self._hparams.layer_mass > 0.0:
layer_prediction = self._layer_predictor.get_op(PredictorComponent.prediction_softmax) #prediction_softmax_op()
distributions.append(layer_prediction)
distribution_mass.append(self._hparams.layer_mass)
# Build the final distribution, calculate loss
ensemble_prediction = tf_build_interpolate_distributions(distributions, distribution_mass, num_classes)
cross_entropy_loss = tf_build_cross_entropy(self._label_values, ensemble_prediction) # Calculate the loss
cross_entropy_mean = tf.reduce_mean(cross_entropy_loss)
ensemble_perplexity = tf.exp(cross_entropy_mean) # instantaneous perplexity (exaggerated)
ensemble_cross_entropy_sum = tf.reduce_sum(cross_entropy_loss)
ensemble_prediction_max = tf.argmax(ensemble_prediction, 1)
self._dual.set_op(self.ensemble_distribution, ensemble_prediction)
self._dual.set_op(self.ensemble_top_1, ensemble_prediction_max)
self._dual.set_op(self.ensemble_perplexity, ensemble_perplexity)
self._dual.set_op(self.ensemble_loss_sum, ensemble_cross_entropy_sum)
if self._hparams.cache_mass > 0.0:
self._build_cache_op(self._label_values, ensemble_prediction)
def _build_cache_pl(self, labels):
logging.info('Building cache placeholder...')
num_classes = labels.get_shape().as_list()[1]
cache_pl = self._dual.add(self.cache, shape=[self._hparams.batch_size, num_classes], default_value=0.0).add_pl()
return cache_pl
def _build_cache_op(self, labels, prediction):
if self._hparams.cache_smart:
self._build_smart_cache_op(labels, prediction)
else:
self._build_simple_cache_op(labels, prediction)
def _build_simple_cache_op(self, labels, prediction):
"""Builds a simple caching operation."""
del prediction
logging.info('Building simple cache op...')
cache_increase = labels
cache_pl = self._dual.get_pl(self.cache)
cache_decay = cache_pl * self._hparams.cache_decay
cache_op = tf.maximum(cache_decay, cache_increase) # Sets to 1 if label set
self._dual.set_op(self.cache, cache_op)
return cache_pl
def _build_smart_cache_op(self, labels, prediction):
"""
Builds a smart caching operation.
Surprise = "how much mass was predicted before the true label"
If surprise is more, then other values will decay faster due to normalization. We will cache the new value.
If surprise is less, other cache values are retained, not caching the new value.
X
0.1, 0.09, 0.05, 0.02 S = 0.1+0.09+0.05 = 0.24
X
0.1, 0.09, 0.05, 0.02 S = 0.1+0.09+0.05 = 0.19
X
0.1, 0.09, 0.05, 0.02 S = 0.1
"""
logging.info('Building smart cache op...')
masked_prediction = labels * prediction # now only a value where label is true (TP). All FP mass is zero.
predicted_mass = tf.reduce_sum(masked_prediction, axis=1, keepdims=True) # The predicted mass of the true label (scalar per batch)
more_likely_bool = tf.greater(prediction, predicted_mass) # bool. Mask if the predicted mass was greater than the true label
more_likely_mask = tf.to_float(more_likely_bool)
predicted_more_likely_mass = prediction * more_likely_mask # Mask out mass that was less than predicted true label mass
surprise = tf.reduce_sum(predicted_more_likely_mass, axis=1, keepdims=True) # now a vector of batch size x 1
cache_increase = labels * surprise # weight the increase by the surprise
cache_pl = self._dual.get_pl(self.cache)
cache_decay = cache_pl * self._hparams.cache_decay
cache_op = tf.maximum(cache_decay, cache_increase) # Smart cache
self._dual.set_op(self.cache, cache_op)
return cache_op
def _build_layer_prediction_input(self):
prediction_layer_idx = self.get_prediction_layer()
prediction_layer = self._layers[prediction_layer_idx]
prediction_input = prediction_layer.get_op(SequenceMemoryLayer.encoding)
prediction_input_shape = prediction_layer.get_shape(SequenceMemoryLayer.encoding)
return prediction_input, prediction_input_shape
def _build_input_prediction_input(self):
prediction_layer_idx = self.get_prediction_layer()
prediction_layer = self._layers[prediction_layer_idx]
prediction_input = prediction_layer.get_op(SequenceMemoryLayer.previous)
prediction_input_shape = prediction_layer.get_shape(SequenceMemoryLayer.previous)
return prediction_input, prediction_input_shape
def _build_layers(self, input_values, input_shape, label_values, label_shape):
"""Build the RSM layers."""
logging.info('Building layers...')
self._layers = []
layers = self.get_num_layers()
layers_hparams = []
layers_shapes = []
layer_input_values = input_values
layer_input_shape = input_shape
# Explicitly specify batch size
if layer_input_shape[0] < 0:
layer_input_shape[0] = self._hparams.batch_size
# Compute geometry of all layers
for i in range(layers):
layer_hparams = SequenceMemoryLayer.default_hparams()
# copy and override parameters
layer_hparams.optimizer = self._hparams.optimizer
layer_hparams.loss_type = self._hparams.loss_type
layer_hparams.learning_rate = self._hparams.learning_rate
layer_hparams.batch_size = self._hparams.batch_size
layer_hparams.mode = self._hparams.mode
#layer_hparams.autoencode = self._hparams.autoencode
layer_hparams.summarize_input = self._hparams.memory_summarize_input
layer_hparams.summarize_encoding = self._hparams.memory_summarize_encoding
layer_hparams.summarize_decoding = self._hparams.memory_summarize_decoding
layer_hparams.summarize_weights = self._hparams.memory_summarize_weights
layer_hparams.summarize_freq = self._hparams.memory_summarize_freq
layer_hparams.training_interval = self._hparams.memory_training_interval
layer_hparams.input_norm_first = self._hparams.input_norm_first
layer_hparams.hidden_nonlinearity = self._hparams.hidden_nonlinearity
layer_hparams.predictor_use_input = False
layer_hparams.predictor_inc_input = False
# Compute conv geometry
ih = layer_input_shape[1]
iw = layer_input_shape[2]
fh = self._hparams.filters_field_height[i]
fw = self._hparams.filters_field_width[i]
fs = self._hparams.filters_field_stride[i]
if fh < 0:
fh = ih
if fw < 0:
fw = iw
if fs < 0:
fs = max(fh, fw)
layer_hparams.filters_field_height = fh
layer_hparams.filters_field_width = fw
layer_hparams.filters_field_stride = fs
# Depth dimension - num filters
layer_hparams.cols = self._hparams.cols[i]
layer_hparams.cells_per_col = self._hparams.cells_per_col[i]
layer_hparams.freq_update_interval = self._hparams.freq_update_interval
layer_hparams.freq_learning_rate = self._hparams.freq_learning_rate
layer_hparams.freq_min = self._hparams.freq_min
#layer_hparams.predictor_norm_input = self._hparams.predictor_norm_input
#layer_hparams.predictor_integrate_input = self._hparams.predictor_integrate_input
layer_hparams.f_l2 = self._hparams.f_l2[i]
layer_hparams.r_l2 = self._hparams.r_l2[i]
layer_hparams.b_l2 = self._hparams.b_l2[i]
layer_hparams.d_l2 = self._hparams.d_l2[i]
layer_hparams.f_init_type = self._hparams.f_init_type[i]
layer_hparams.r_init_type = self._hparams.r_init_type[i]
layer_hparams.b_init_type = self._hparams.b_init_type[i]
layer_hparams.d_init_type = self._hparams.d_init_type[i]
layer_hparams.f_bias_init_type = self._hparams.f_bias_init_type[i]
layer_hparams.r_bias_init_type = self._hparams.r_bias_init_type[i]
layer_hparams.b_bias_init_type = self._hparams.b_bias_init_type[i]
layer_hparams.d_bias_init_type = self._hparams.d_bias_init_type[i]
layer_hparams.f_init_sd = self._hparams.f_init_sd[i]
layer_hparams.r_init_sd = self._hparams.r_init_sd[i]
layer_hparams.b_init_sd = self._hparams.b_init_sd[i]
layer_hparams.d_init_sd = self._hparams.d_init_sd[i]
layer_hparams.f_bias = self._hparams.f_bias[i]
layer_hparams.r_bias = self._hparams.r_bias[i]
layer_hparams.b_bias = self._hparams.b_bias[i]
layer_hparams.d_bias = self._hparams.d_bias[i]
layer_hparams.decode_mode = self._hparams.decode_mode[i]
layer_hparams.decode_nonlinearity = self._hparams.decode_nonlinearity[i]
layer_hparams.boost_factor = self._hparams.boost_factor[i]
layer_hparams.boost_factor_decay = self._hparams.boost_factor_decay[i]
layer_hparams.boost_factor_update_interval = self._hparams.boost_factor_update_interval[i]
layer_hparams.inhibition_decay = self._hparams.inhibition_decay[i]
layer_hparams.inhibition_with_mask = self._hparams.inhibition_with_mask
layer_hparams.hidden_keep_rate = self._hparams.hidden_keep_rate[i]
layer_hparams.f_keep_rate = self._hparams.f_keep_rate[i]
layer_hparams.f_decay_rate = self._hparams.f_decay_rate[i]
layer_hparams.f_decay_floor = self._hparams.f_decay_floor[i]
layer_hparams.f_norm_type = self._hparams.f_norm_type[i]
layer_hparams.f_norm_eps = self._hparams.f_norm_eps[i]
layer_hparams.f_decay_trainable = self._hparams.f_decay_trainable[i]
layer_hparams.f_decay_rate_max = self._hparams.f_decay_rate_max[i]
layer_hparams.rb_keep_rate = self._hparams.rb_keep_rate[i]
layer_hparams.rb_decay_rate = self._hparams.rb_decay_rate[i]
layer_hparams.rb_decay_floor = self._hparams.rb_decay_floor[i]
layer_hparams.rb_norm_type = self._hparams.rb_norm_type[i]
layer_hparams.rb_norm_eps = self._hparams.rb_norm_eps[i]
layer_hparams.rb_decay_trainable = self._hparams.rb_decay_trainable[i]
layer_hparams.rb_decay_rate_max = self._hparams.rb_decay_rate_max[i]
layer_hparams.sparsity = self._hparams.sparsity[i]
layer_hparams.lifetime_sparsity_dends = self._hparams.lifetime_sparsity_dends
layer_hparams.lifetime_sparsity_cols = self._hparams.lifetime_sparsity_cols
logging.debug('layer: %d h/w/s: %d/%d/%d',
i,
layer_hparams.filters_field_height,
layer_hparams.filters_field_width,
layer_hparams.filters_field_stride)
layer_shape = SequenceMemoryLayer.get_encoding_shape_4d(layer_input_shape, layer_hparams)
layers_hparams.append(layer_hparams)
layers_shapes.append(layer_shape)
layer_input_shape = layer_shape # for next layer
# Max-pooling - affects next layer input shape
pool_size = self._hparams.pool_size[i]
if pool_size > 1:
logging.info('Pooling %s:1', str(pool_size))
# layer_input_shape[1] = int(layer_input_shape[1] / pool_size)
# layer_input_shape[2] = int(layer_input_shape[2] / pool_size)
layer_input_shape[1] = math.ceil(float(layer_input_shape[1]) / float(pool_size))
layer_input_shape[2] = math.ceil(float(layer_input_shape[2]) / float(pool_size))
# 2nd pass - for bi-directional connectivity
layer_input_values = input_values
layer_input_shape = input_shape
for i in range(layers):
layer_hparams = layers_hparams[i] # retrieve precalculated hparams
layer = SequenceMemoryLayer()
layer_name = 'layer-'+str(i+1)
layer_feedback_shape = None # Connectivity FF
if self._hparams.connectivity == SequenceMemoryStack.connectivity_bi:
logging.info('Bidirectional connectivity enabled.')
if i < (layers-1):
layer_feedback_shape = layers_shapes[i+1]
else:
logging.info('Feed-forward connectivity enabled.')
layer.build(layer_input_values, layer_input_shape, layer_hparams, name=layer_name, encoding_shape=None,
feedback_shape=layer_feedback_shape, target_shape=label_shape, target_values=label_values)
self._layers.append(layer)
# link layers
# This means it'll update with the latest state of input in lower layer WRT current input
output_encoding = layer.get_op(SequenceMemoryLayer.encoding) # 4d, protected with StopGradient
layer_input_values = output_encoding
pool_size = self._hparams.pool_size[i]
if pool_size > 1:
logging.info('Pooling %s:1', str(pool_size))
pool_sizes = [1, pool_size, pool_size, 1]
pool_strides = [1, pool_size, pool_size, 1]
layer_input_values = tf.nn.max_pool(output_encoding, pool_sizes, pool_strides, padding='SAME')
#print( "output encoding, ", output_encoding)
layer_input_shape = layer_input_values.shape.as_list()
def _build_predictor(self, prediction_input, prediction_input_shape, label_values, label_shape, target_values,
target_shape, name='p'):
"""Build the predictor using outputs from RSM layers."""
# Build the predictor
predictor = PredictorComponent()
predictor_hparams = predictor.default_hparams()
predictor_hparams.optimizer = self._hparams.optimizer
predictor_hparams.optimize = self._hparams.predictor_optimize
predictor_hparams.loss_type = self._hparams.predictor_loss_type
predictor_hparams.learning_rate = self._hparams.learning_rate
predictor_hparams.batch_size = self._hparams.batch_size
predictor_hparams.training_interval = self._hparams.predictor_training_interval
predictor_hparams.nonlinearity = self._hparams.predictor_nonlinearity
predictor_hparams.hidden_size = self._hparams.predictor_hidden_size
predictor_hparams.norm_type = self._hparams.predictor_norm_type
predictor_hparams.norm_eps = self._hparams.predictor_norm_eps
predictor_hparams.input_norm_first = self._hparams.predictor_input_norm_first
predictor_hparams.input_decay_rate = self._hparams.predictor_input_decay_rate
predictor_hparams.keep_rate = self._hparams.predictor_keep_rate
predictor_hparams.init_type = self._hparams.predictor_init_type
predictor_hparams.init_type_bias = self._hparams.predictor_init_type_bias
predictor_hparams.init_sd = self._hparams.predictor_init_sd
predictor_hparams.l2 = self._hparams.predictor_l2
predictor_hparams.label_smoothing = self._hparams.predictor_label_smoothing
predictor.build(prediction_input, prediction_input_shape, label_values, label_shape, target_values, target_shape,
predictor_hparams, name=name)
return predictor
# BATCH INTERFACE ------------------------------------------------------------------
def update_feed_dict(self, feed_dict, batch_type='training'):
"""Updates the feed dict in each layer."""
if self._hparams.decode_mass > 0.0:
self._decoder.update_feed_dict(feed_dict, batch_type)
# Layers
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.update_feed_dict(feed_dict, batch_type)
# Predictor
if self._hparams.layer_mass > 0.0:
self._layer_predictor.update_feed_dict(feed_dict, batch_type)
if self._hparams.input_mass > 0.0:
self._input_predictor.update_feed_dict(feed_dict, batch_type)
if self._hparams.file_mass > 0.0:
file_dual = self._dual.get(self.file)
file_pl = file_dual.get_pl()
file_values = file_dual.get_values() # assume this is populated somehow by workflow.
feed_dict.update({
file_pl: file_values
})
# Cache
if self._hparams.cache_mass > 0.0:
cache = self._dual.get(SequenceMemoryStack.cache)
cache_pl = cache.get_pl()
cache_values = cache.get_values()
feed_dict.update({
cache_pl: cache_values
})
def add_fetches(self, fetches, batch_type='training'):
"""Add fetches in each layer for session run call."""
# Layers
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.add_fetches(fetches, batch_type)
# Predictors
if self._hparams.layer_mass > 0.0:
self._layer_predictor.add_fetches(fetches, batch_type)
if self._hparams.input_mass > 0.0:
self._input_predictor.add_fetches(fetches, batch_type)
# Cache
fetches[self.name] = {}
if self._hparams.cache_mass > 0.0:
fetches[self.name].update({
SequenceMemoryStack.cache: self._dual.get_op(SequenceMemoryStack.cache),
})
# Ensemble
if self._hparams.predictor_optimize == 'accuracy':
fetches[self.name].update({
self.ensemble_distribution: self._dual.get_op(self.ensemble_distribution),
self.ensemble_top_1: self._dual.get_op(self.ensemble_top_1),
self.ensemble_perplexity: self._dual.get_op(self.ensemble_perplexity),
self.ensemble_loss_sum: self._dual.get_op(self.ensemble_loss_sum)
})
else:
fetches[self.name].update({
self.prediction: self._dual.get_op(self.prediction),
self.prediction_loss: self._dual.get_op(self.prediction_loss)
})
# Summaries
super().add_fetches(fetches, batch_type)
def set_fetches(self, fetched, batch_type='training'):
"""Set fetches in each layer."""
# Layers
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.set_fetches(fetched, batch_type)
# Predictors
if self._hparams.layer_mass > 0.0:
self._layer_predictor.set_fetches(fetched, batch_type)
if self._hparams.input_mass > 0.0:
self._input_predictor.set_fetches(fetched, batch_type)
names = []
# Cache
if self._hparams.cache_mass > 0.0:
names.append(SequenceMemoryStack.cache)
# Ensemble
if self._hparams.predictor_optimize == 'accuracy':
names.append(self.ensemble_distribution)
names.append(self.ensemble_top_1)
names.append(self.ensemble_perplexity)
names.append(self.ensemble_loss_sum)
else:
names.append(self.prediction)
names.append(self.prediction_loss)
self._dual.set_fetches(fetched, names)
# Summaries
super().set_fetches(fetched, batch_type)
def write_summaries(self, step, writer, batch_type='training'):
"""Write the TensorBoard summaries for each layer."""
# Layers
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.write_summaries(step, writer, batch_type)
# Predictors
if self._hparams.layer_mass > 0.0:
self._layer_predictor.write_summaries(step, writer, batch_type)
if self._hparams.input_mass > 0.0:
self._input_predictor.write_summaries(step, writer, batch_type)
# Summaries
super().write_summaries(step, writer, batch_type)
def build_summaries(self, batch_types=None, max_outputs=3, scope=None):
"""Builds the summaries for each layer."""
# Layers
layers = self.get_num_layers()
for i in range(layers):
layer = self.get_layer(i)
layer.build_summaries(batch_types)
# Predictors
if self._hparams.layer_mass > 0.0:
self._layer_predictor.build_summaries(batch_types)
if self._hparams.input_mass > 0.0:
self._input_predictor.build_summaries(batch_types)
# Summaries
super().build_summaries(batch_types, max_outputs, scope)
def _build_summaries(self, batch_type, max_outputs=3):
"""Build summaries."""
del batch_type
# Ensemble interpolation
summaries = []
if self._hparams.summarize_distributions:
ensemble_perplexity = self._dual.get_op(self.ensemble_perplexity)
ensemble_cross_entropy_sum = self._dual.get_op(self.ensemble_loss_sum)
#ensemble_top_1 = self._dual.get_op(self.ensemble_top_1)
ensemble_distribution = self._dual.get_op(self.ensemble_distribution)
ensemble_distribution_sum = tf.reduce_sum(ensemble_distribution)
summaries.append(tf.summary.scalar('mean_perplexity', tf.reduce_mean(ensemble_perplexity)))
summaries.append(tf.summary.scalar(self.ensemble_loss_sum, ensemble_cross_entropy_sum))
summaries.append(tf.summary.scalar('distribution_sum', ensemble_distribution_sum))
#summaries.append(tf.summary.scalar(self.ensemble_top_1, ensemble_top_1))
#ensemble_distribution = tf.Print(ensemble_distribution, [ensemble_distribution], 'DIST ', summarize=48)
ensemble_shape = ensemble_distribution.get_shape().as_list()
ensemble_shape_4d = [ensemble_shape[0], 1, ensemble_shape[1], 1]
#print('>>>>>', ensemble_shape_4d)
ensemble_distribution_reshape = tf.reshape(ensemble_distribution, ensemble_shape_4d)
p_summary_op = tf.summary.image(self.ensemble_distribution, ensemble_distribution_reshape,
max_outputs=max_outputs)
summaries.append(p_summary_op)
if not summaries:
return None
return summaries
|
py | 7dfe3cde380b5e2d3fa79d2e934758340d642685 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .virtual_machine_image_resource import VirtualMachineImageResource
class VirtualMachineImage(VirtualMachineImageResource):
"""Describes a Virtual Machine Image.
:param id: Resource Id
:type id: str
:param name: The name of the resource.
:type name: str
:param location: The supported Azure location of the resource.
:type location: str
:param tags: The tags attached to the resource.
:type tags: dict
:param plan:
:type plan: :class:`PurchasePlan
<azure.mgmt.compute.v2016_04_30_preview.models.PurchasePlan>`
:param os_disk_image:
:type os_disk_image: :class:`OSDiskImage
<azure.mgmt.compute.v2016_04_30_preview.models.OSDiskImage>`
:param data_disk_images:
:type data_disk_images: list of :class:`DataDiskImage
<azure.mgmt.compute.v2016_04_30_preview.models.DataDiskImage>`
"""
_validation = {
'name': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'properties.plan', 'type': 'PurchasePlan'},
'os_disk_image': {'key': 'properties.osDiskImage', 'type': 'OSDiskImage'},
'data_disk_images': {'key': 'properties.dataDiskImages', 'type': '[DataDiskImage]'},
}
def __init__(self, name, location, id=None, tags=None, plan=None, os_disk_image=None, data_disk_images=None):
super(VirtualMachineImage, self).__init__(id=id, name=name, location=location, tags=tags)
self.plan = plan
self.os_disk_image = os_disk_image
self.data_disk_images = data_disk_images
|
py | 7dfe3db932c4fecb4e9bd257cf8764c57e6df96c | from click.testing import CliRunner
from eslog.release import __version__
from eslog.main import cli
runner = CliRunner()
def test_version():
assert __version__ == "0.0.3"
def test_list_help():
result = runner.invoke(cli, ["list", "--help"])
assert result.exit_code == 0
assert "List all indexes" in result.output
def test_logs_help():
result = runner.invoke(cli, ["logs", "--help"])
assert result.exit_code == 0
assert " Get and archive logs" in result.output
|
py | 7dfe3e52841d938e272258e6aa8101d09e4e5908 | import asyncio
import typing
from io import BytesIO
import aiohttp
import discord
from discord.ext import commands
from yarl import URL
class GuildManagement(
commands.Cog,
name="Server Management",
description="Module for managing everyday server events",
):
def __init__(self, bot):
self.bot = bot
self.emoji = 836843090870272020
@commands.Cog.listener()
async def on_guild_join(self, guild):
async with self.bot.db.acquire() as conn:
async with conn.transaction():
await conn.execute("INSERT INTO guilds(gid) VALUES($1)", guild.id)
await self.bot.recache_guild(guild.id)
@commands.has_permissions(administrator=True)
@commands.command()
async def poll(self, ctx, description: str, choice1: str, choice2: str):
""" """
description = f"{description} \n \n \U0001f170 {choice1} \n \U0001f171 {choice2}"
poll_embed = self.bot.embed(
title="Poll",
color=0x77FC03,
description="".join(description),
colorful=False,
)
poll_embed.set_footer(text=f"Poll started by {ctx.author.name}")
msg = await ctx.send(embed=poll_embed)
await msg.add_reaction("\U0001f170")
await msg.add_reaction("\U0001f171")
@commands.command()
async def bots(self, ctx):
description = ""
for member in ctx.guild.members:
if member.bot:
description += f"{member.mention} joined at {str(member.joined_at.strftime('%Y:%m:%d'))} \n"
pass
embed = self.bot.embed(title=f"Bots in {ctx.guild.name}", description=description)
await ctx.send(embed=embed)
@commands.has_permissions(administrator=True)
@commands.group(invoke_without_command=True)
async def emoji(self, ctx: commands.Context):
if not ctx.invoked_subcommand:
await ctx.send_help("emoji")
@emoji.command()
async def add(
self,
ctx: commands.Context,
name: str,
stolen_emoji: typing.Optional[discord.PartialEmoji],
url: typing.Optional[str],
roles: commands.Greedy[discord.Role],
):
sess = aiohttp.ClientSession()
async def add_emoji(url, name):
async with sess.get(url) as resp:
emoji = await ctx.guild.create_custom_emoji(name=name, image=await resp.read(), roles=roles)
return await ctx.reply(f"{emoji} has been added")
# if not URL(url).scheme:
# return await ctx.reply("The provided url is not valid")
if url:
await add_emoji(url, name)
elif ctx.message.attachments:
await add_emoji(ctx.message.attachments[0].url, name)
elif stolen_emoji:
await add_emoji(stolen_emoji.url, name)
else:
await ctx.reply("No attachment or url provided!!")
return await sess.close()
@emoji.command()
async def delete(self, ctx, emojis: commands.Greedy[discord.Emoji]):
def checkM(msg):
return ctx.author == msg.author and msg.channel == ctx.channel
await ctx.reply("This command will delete emojis permanently. Reply `Y` if you still want to continue")
consent = await self.bot.wait_for("message", check=checkM)
if not consent.content in ["Y", "y"]:
try:
return await ctx.reply("Deletion Aborted")
except commands.MessageNotFound:
return await ctx.send("Deletion Aborted")
status = await asyncio.gather(*[emoji.delete() for emoji in emojis], return_exceptions=True)
await ctx.reply(f"Successfully deleted {len(emojis)} emojis")
def setup(bot):
bot.add_cog(GuildManagement(bot))
|
py | 7dfe3e8d2620acb58075a251c2eb5440274a0aaf | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for making lists of tests, and an AJAX endpoint to list tests.
This module contains functions for listing:
- Sub-tests for a given test suite (in a tree structure).
- Tests which match a given test path pattern.
"""
import json
from google.appengine.ext import ndb
from dashboard import layered_cache
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import graph_data
class ListTestsHandler(request_handler.RequestHandler):
"""URL endpoint for AJAX requests to list masters, bots, and tests."""
def post(self):
"""Outputs a JSON string of the requested list.
Request parameters:
type: Type of list to make, one of "suite", "sub_tests" or "pattern".
suite: Test suite name (applies only if type is "sub_tests").
bots: Comma-separated bots name (applies only if type is "sub_tests").
p: Test path pattern (applies only if type is "pattern").
has_rows: "1" if the requester wants to list only list tests that
have points (applies only if type is "pattern").
Outputs:
A data structure with test names in JSON format, or nothing.
"""
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
list_type = self.request.get('type')
# TODO(qyearsley): Separate these into two different handlers.
if list_type == 'sub_tests':
suite_name = self.request.get('suite')
bot_names = self.request.get('bots').split(',')
test_list = GetSubTests(suite_name, bot_names)
self.response.out.write(json.dumps(test_list))
if list_type == 'pattern':
pattern = self.request.get('p')
only_with_rows = self.request.get('has_rows') == '1'
test_list = GetTestsMatchingPattern(
pattern, only_with_rows=only_with_rows)
self.response.out.write(json.dumps(test_list))
def GetSubTests(suite_name, bot_names):
"""Gets the entire tree of subtests for the suite with the given name.
Each bot may have different sub-tests available, but there is one combined
sub-tests dict returned for all the bots specified.
This method is used by the test-picker select menus to display what tests
are available; only tests that are not deprecated should be listed.
Args:
suite_name: Top level test name.
bot_names: List of master/bot names in the form "<master>/<platform>".
Returns:
A dict mapping test names to dicts to entries which have the keys
"has_rows" (boolean) and "sub_tests", which is another sub-tests dict.
This forms a tree structure.
"""
# For some bots, there may be cached data; First collect and combine this.
combined = {}
for bot_name in bot_names:
master, bot = bot_name.split('/')
suite_key = ndb.Key('TestMetadata', '%s/%s/%s' % (master, bot, suite_name))
cached = layered_cache.Get(_ListSubTestCacheKey(suite_key))
if cached:
combined = _MergeSubTestsDict(combined, cached)
else:
sub_test_paths = _FetchSubTestPaths(suite_key, False)
deprecated_sub_test_paths = _FetchSubTestPaths(suite_key, True)
sub_tests = _MergeSubTestsDict(
_SubTestsDict(sub_test_paths, False),
_SubTestsDict(deprecated_sub_test_paths, True))
layered_cache.Set(_ListSubTestCacheKey(suite_key), sub_tests)
combined = _MergeSubTestsDict(combined, sub_tests)
return combined
def _FetchSubTestPaths(test_key, deprecated):
"""Makes a list of partial test paths for descendants of a test suite.
Args:
test_key: A ndb.Key object for a TestMetadata entity.
deprecated: Whether or not to fetch deprecated tests.
Returns:
A list of test paths for all descendant TestMetadata entities that have
associated Row entities. These test paths omit the Master/bot/suite part.
"""
keys = GetTestDescendants(test_key, has_rows=True, deprecated=deprecated)
return map(_SubTestPath, keys)
def _SubTestPath(test_key):
"""Returns the part of a test path starting from after the test suite."""
full_test_path = utils.TestPath(test_key)
parts = full_test_path.split('/')
assert len(parts) > 3
return '/'.join(parts[3:])
def _SubTestsDict(paths, deprecated):
"""Constructs a sub-test dict from a list of test paths.
Args:
paths: An iterable of test paths for which there are points. Each test
path is of the form "Master/bot/benchmark/chart/...". Each test path
corresponds to a TestMetadata entity for which has_rows is set to True.
deprecated: Whether test are deprecated.
Returns:
A recursively nested dict of sub-tests, as returned by GetSubTests.
"""
sub_tests = {}
top_level = set(p.split('/')[0] for p in paths if p)
for name in top_level:
sub_test_paths = _SubPaths(paths, name)
has_rows = name in paths
sub_tests[name] = _SubTestsDictEntry(sub_test_paths, has_rows, deprecated)
return sub_tests
def _SubPaths(paths, first_part):
"""Returns paths of sub-tests that start with some name."""
assert first_part
return ['/'.join(p.split('/')[1:]) for p in paths
if '/' in p and p.split('/')[0] == first_part]
def _SubTestsDictEntry(sub_test_paths, has_rows, deprecated):
"""Recursively gets an entry in a sub-tests dict."""
entry = {
'has_rows': has_rows,
'sub_tests': _SubTestsDict(sub_test_paths, deprecated)
}
if deprecated:
entry['deprecated'] = True
return entry
def _ListSubTestCacheKey(test_key):
"""Returns the sub-tests list cache key for a test suite."""
parts = utils.TestPath(test_key).split('/')
master, bot, suite = parts[0:3]
return graph_data.LIST_TESTS_SUBTEST_CACHE_KEY % (master, bot, suite)
def _MergeSubTestsDict(a, b):
"""Merges two sub-tests dicts together."""
sub_tests = {}
a_names, b_names = set(a), set(b)
for name in a_names & b_names:
sub_tests[name] = _MergeSubTestsDictEntry(a[name], b[name])
for name in a_names - b_names:
sub_tests[name] = a[name]
for name in b_names - a_names:
sub_tests[name] = b[name]
return sub_tests
def _MergeSubTestsDictEntry(a, b):
"""Merges two corresponding sub-tests dict entries together."""
assert a and b
deprecated = a.get('deprecated', False) and b.get('deprecated', False)
entry = {
'has_rows': a['has_rows'] or b['has_rows'],
'sub_tests': _MergeSubTestsDict(a['sub_tests'], b['sub_tests'])
}
if deprecated:
entry['deprecated'] = True
return entry
def GetTestsMatchingPattern(pattern, only_with_rows=False, list_entities=False):
"""Gets the TestMetadata entities or keys which match |pattern|.
For this function, it's assumed that a test path should only have up to seven
parts. In theory, tests can be arbitrarily nested, but in practice, tests
are usually structured as master/bot/suite/graph/trace, and only a few have
seven parts.
Args:
pattern: /-separated string of '*' wildcard and TestMetadata string_ids.
only_with_rows: If True, only return TestMetadata entities which have data
points.
list_entities: If True, return entities. If false, return keys (faster).
Returns:
A list of test paths, or test entities if list_entities is True.
"""
property_names = [
'master_name', 'bot_name', 'suite_name', 'test_part1_name',
'test_part2_name', 'test_part3_name', 'test_part4_name',
'test_part5_name']
pattern_parts = pattern.split('/')
if len(pattern_parts) > 8:
return []
# Below, we first build a list of (property_name, value) pairs to filter on.
query_filters = []
for index, part in enumerate(pattern_parts):
if '*' not in part:
query_filters.append((property_names[index], part))
for index in range(len(pattern_parts), 7):
# Tests longer than the desired pattern will have non-empty property names,
# so they can be filtered out by matching against an empty string.
# Bug: 'test_part5_name' was added recently, and TestMetadata entities which
# were created before then do not match it. Since it is the last part, and
# rarely used, it's okay not to test for it. See
# https://github.com/catapult-project/catapult/issues/2885
query_filters.append((property_names[index], ''))
# Query tests based on the above filters. Pattern parts with * won't be
# filtered here; the set of tests queried is a superset of the matching tests.
query = graph_data.TestMetadata.query()
for f in query_filters:
query = query.filter(
# pylint: disable=protected-access
graph_data.TestMetadata._properties[f[0]] == f[1])
query = query.order(graph_data.TestMetadata.key)
if only_with_rows:
query = query.filter(
graph_data.TestMetadata.has_rows == True)
test_keys = query.fetch(keys_only=True)
# Filter to include only tests that match the pattern.
test_keys = [k for k in test_keys if utils.TestMatchesPattern(k, pattern)]
if list_entities:
return ndb.get_multi(test_keys)
return [utils.TestPath(k) for k in test_keys]
def GetTestDescendants(
test_key, has_rows=None, deprecated=None, keys_only=True):
"""Returns all the tests which are subtests of the test with the given key.
Args:
test_key: The key of the TestMetadata entity to get descendants of.
has_rows: If set, filter the query for this value of has_rows.
deprecated: If set, filter the query for this value of deprecated.
Returns:
A list of keys of all descendants of the given test.
"""
test_parts = utils.TestPath(test_key).split('/')
query_parts = [
('master_name', test_parts[0]),
('bot_name', test_parts[1]),
('suite_name', test_parts[2]),
]
for index, part in enumerate(test_parts[3:]):
query_parts.append(('test_part%d_name' % (index + 1), part))
query = graph_data.TestMetadata.query()
for part in query_parts:
query = query.filter(ndb.GenericProperty(part[0]) == part[1])
if has_rows is not None:
query = query.filter(graph_data.TestMetadata.has_rows == has_rows)
if deprecated is not None:
query = query.filter(graph_data.TestMetadata.deprecated == deprecated)
descendants = query.fetch(keys_only=keys_only)
return descendants
|
py | 7dfe3e9066cca3a1028baa87da869cc319c16422 | from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from user.models import User
from user.serializers import UserSerializer
import uuid
client = Client()
class SubmitScoreTest(TestCase):
def setUp(self):
self.firat = User.objects.create(display_name='firat')
self.faruk = User.objects.create(display_name='faruk')
def test_submit_score_valid(self):
data = {
'user_id': self.firat.user_id,
'score_worth': 15,
}
response = client.post('/score/submit/', data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_submit_invalid_score(self):
data = {
'user_id': self.faruk.user_id,
'score_worth': 'asd',
}
response = client.post('/score/submit/', data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_submit_invalid_user(self):
data = {
'user_id': uuid.uuid4(),
'score_worth': 15,
}
response = client.post('/score/submit/', data=data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
py | 7dfe3f3c3e36eb5477aade8942c65ac078378f67 | # -*- coding: utf8 -*-
# MIT License
# Copyright (c) 2017 Simon Wüllhorst
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import datetime
import re
class GraphiteHandler(object):
def __init__(self, server, port, alternative_now = None):
self.server = server
self.port = port
self.entries = []
self.specialChars = dict.fromkeys(map(ord, ' +.\\/-'), '_')
if alternative_now:
self.utc_stamp_now = datetime.datetime.strptime(alternative_now, '%Y-%m-%d_%H-%M-%S').strftime("%s")
else:
self.utc_stamp_now = datetime.datetime.now().strftime("%s")
@property
def message(self):
return ''.join(self.entries)
def prepareMessage(self, domains, nodes):
self.__nestedWalker__('nodes', domains)
self.__nestedWalker__('node', nodes)
# print(self.message)
# self.send(self.message)
def __nestedWalker__(self, prefix, tree):
if isinstance(tree, dict):
for k, v in tree.items():
if k:
self.__nestedWalker__(''.join((prefix, '.', k.translate(self.specialChars))), v)
else:
# credits to https://wiki.python.org/moin/PythonSpeed/PerformanceTips#String_Concatenation
self.entries.append(''.join((prefix, ' ', str(tree), ' ', self.utc_stamp_now , '\n')))
def filterMessage(self, pattern, fMode = 'normal', fType = 'graphite_filter'):
if fType == 'graphite_filter':
self.__graphiteFilter__(pattern, fMode)
else:
raise Exception('Selected filter type is not implemented, yet.')
def __graphiteFilter__(self, pattern, fMode):
inverse = True if fMode == 'inverse' else False
regex = re.compile(pattern)
filteredEntries = []
for entry in self.entries:
match = regex.search(entry)
if match and not inverse or inverse and not match:
filteredEntries.append(entry)
self.entries = filteredEntries
def printMessage(self):
print(self.message)
def sendMessage(self):
sock = socket.socket()
sock.connect((self.server, int(self.port)))
sock.sendall(self.message.encode())
sock.close()
|
py | 7dfe4026875b35ba5167ae8d0dfb8fefcc177874 | import math
from qtpy.QtCore import (QPointF, QRectF, Property)
from qtpy.QtGui import (QColor, QBrush, QPainterPath, QPolygonF, QTransform)
from .base import BaseSymbolIcon
class ScrollPumpSymbolIcon(BaseSymbolIcon):
"""
A widget with a scroll pump symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(ScrollPumpSymbolIcon, self).__init__(parent, **kwargs)
self._center_brush = QBrush(QColor("transparent"))
@Property(QBrush)
def centerBrush(self):
return self._center_brush
@centerBrush.setter
def centerBrush(self, new_brush):
if new_brush != self._center_brush:
self._center_brush = new_brush
self.update()
def draw_icon(self, painter):
painter.drawEllipse(QPointF(0.5, 0.5), 0.5, 0.5)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 45 * 16, -120 * 16)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 135 * 16, 120 * 16)
circle_arrow_point = QPointF(0.3, 0.5)
brush = painter.brush()
pen = painter.pen()
painter.setBrush(self.centerBrush)
painter.setPen(QColor("transparent"))
painter.drawEllipse(QPointF(0.5, 0.5), 0.2, 0.2)
painter.setBrush(brush)
painter.setPen(pen)
painter.drawArc(QRectF(0.3, 0.3, 0.4, 0.4), 90 * 16, -270 * 16)
arrow = QPolygonF(
[QPointF(-0.025, 0.0), QPointF(0.025, 0.0), QPointF(0.0, -0.025)])
painter.setBrush(QBrush(QColor(0, 0, 0)))
painter.drawPolygon(arrow.translated(circle_arrow_point))
class IonPumpSymbolIcon(BaseSymbolIcon):
"""
A widget with an ion pump symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def draw_icon(self, painter):
painter.drawEllipse(QPointF(0.5, 0.5), 0.5, 0.5)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 45 * 16, -120 * 16)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 135 * 16, 120 * 16)
bottom_arrow_point = QPointF(0.5, 0.8)
painter.drawLine(bottom_arrow_point, QPointF(0.5, 0.7))
curve_start = QPointF(0.5, 0.7)
bend_angle = 25
curve_end_l = QPointF(
0.4 * math.cos(math.radians(90 + bend_angle)) + 0.5,
-0.4 * math.sin(math.radians(90 + bend_angle)) + 0.5)
c1 = QPointF(0.5, 0.4)
path = QPainterPath(curve_start)
path.quadTo(c1, curve_end_l)
painter.drawPath(path)
curve_end_r = QPointF(
0.4 * math.cos(math.radians(90 - bend_angle)) + 0.5,
-0.4 * math.sin(math.radians(90 - bend_angle)) + 0.5)
path = QPainterPath(curve_start)
path.quadTo(c1, curve_end_r)
painter.drawPath(path)
# Draw the arrow end-caps
painter.setBrush(QBrush(QColor(0, 0, 0)))
arrow = QPolygonF(
[QPointF(-0.025, 0.0), QPointF(0.025, 0.0), QPointF(0.0, 0.025)])
painter.drawPolygon(arrow.translated(bottom_arrow_point))
t = QTransform()
t.rotate(180.0 - 25.0)
arrow_l = t.map(arrow)
arrow_l = arrow_l.translated(curve_end_l)
painter.drawPolygon(arrow_l)
t = QTransform()
t.rotate(180.0 + 25.0)
arrow_r = t.map(arrow)
arrow_r = arrow_r.translated(curve_end_r)
painter.drawPolygon(arrow_r)
class TurboPumpSymbolIcon(BaseSymbolIcon):
"""
A widget with a turbo pump symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def __init__(self, parent=None, **kwargs):
super(TurboPumpSymbolIcon, self).__init__(parent, **kwargs)
self._center_brush = QBrush(QColor("transparent"))
@Property(QBrush)
def centerBrush(self):
return self._center_brush
@centerBrush.setter
def centerBrush(self, new_brush):
if new_brush != self._center_brush:
self._center_brush = new_brush
self.update()
def draw_icon(self, painter):
# Outer circle
painter.drawEllipse(QPointF(0.5, 0.5), 0.5, 0.5)
brush = painter.brush()
pen = painter.pen()
painter.setBrush(self.centerBrush)
# Inner concentric circles
painter.drawEllipse(QPointF(0.5, 0.5), 0.2, 0.2)
painter.drawEllipse(QPointF(0.5, 0.5), 0.1, 0.1)
painter.setBrush(brush)
painter.setPen(pen)
# Inner straight lines
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 45 * 16, -120 * 16)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 135 * 16, 120 * 16)
class GetterPumpSymbolIcon(BaseSymbolIcon):
"""
A widget with a getter pump symbol drawn in it.
Parameters
----------
parent : QWidget
The parent widget for the icon
"""
def draw_icon(self, painter):
painter.drawEllipse(QPointF(0.5, 0.5), 0.5, 0.5)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 90 * 16, -100 * 16)
painter.drawChord(QRectF(0.0, 0.0, 1.0, 1.0), 135 * 16, 100 * 16)
# Draw the arrow end-caps
painter.setBrush(QBrush(QColor(0, 0, 0)))
top_arrow_point = QPointF(0.35, 0.15)
arrow = QPolygonF(
[QPointF(-0.08, 0.0),
QPointF(-0.005, 0.0),
QPointF(-0.005, 0.15),
QPointF(0.005, 0.15),
QPointF(0.005, 0.0),
QPointF(0.08, 0.0),
QPointF(0.00, -0.08)]
)
t = QTransform()
t.rotate(-25)
top_arrow_r = t.map(arrow)
arrow_l = top_arrow_r.translated(top_arrow_point)
painter.drawPolygon(arrow_l)
bottom_left_arrow_point = QPointF(0.35, 0.89)
t = QTransform()
t.rotate(180.0 + 25.0)
arrow_r = t.map(arrow)
arrow_r = arrow_r.translated(bottom_left_arrow_point)
painter.drawPolygon(arrow_r)
bottom_right_arrow_point = QPointF(0.85, 0.65)
t = QTransform()
t.rotate(180.0 - 65.0)
arrow_r = t.map(arrow)
arrow_r = arrow_r.translated(bottom_right_arrow_point)
painter.drawPolygon(arrow_r)
|
py | 7dfe4027c1aa15db7f22c398e179b331c975426f | class Mastermind:
def __init__(self):
self.resetState()
def registerResponse(self,guess,response):
if response == "2222":
# Win
return
eliminated = self.computeEliminatedList(guess,response)
self.possibleSolutions -= eliminated
def nextGuess(self):
possibleResponses = set()
for i in range(0,3):
for j in range(0,3):
for k in range(0,3):
for l in range(0,3):
code = "{}{}{}{}".format(i,j,k,l)
possibleResponses |= {code}
bestGuess = ""
bestEliminations = -1
for guess in self.possibleSolutions:
minEliminations = 9999
for response in possibleResponses:
eliminations = len(self.computeEliminatedList(guess,response))
if eliminations < minEliminations:
minEliminations = eliminations
if minEliminations > bestEliminations:
bestEliminations = minEliminations
bestGuess = guess
return guess
def computeEliminatedList(self,guess,response):
count_2 = response.count("2")
count_1 = response.count("1")
eliminated = set()
for code in self.possibleSolutions:
sim_response = self.computeResponse(guess,code)
sim_count_2 = sim_response.count("2")
sim_count_1 = sim_response.count("1")
if sim_count_1 != count_1 or sim_count_2 != count_2:
eliminated |= {code}
return eliminated
def computeResponse(self,guess,code):
code_finite = list(code)
response = ["?"]*4
for i in range(0,4):
if guess[i] == code[i]:
response[i] = "2"
code_finite[i] = " "
for i in range(0,4):
if guess[i] != code[i]:
if guess[i] in code_finite:
response[i] = "1"
code_finite[ code_finite.index(guess[i]) ] = " "
else:
response[i] = "0"
return response
def resetState(self):
self.possibleSolutions = set()
for i in range(1,7):
for j in range(1,7):
for k in range(1,7):
for l in range(1,7):
code = "{}{}{}{}".format(i,j,k,l)
self.possibleSolutions |= {code}
|
py | 7dfe406f25208b274278316c7656e720e96f8ca2 | """
TODO: Not complete.
"""
from __future__ import annotations
from EasyNN.loss.abc import Loss
from EasyNN.typing import Array1D
class MeanSquareError(Loss):
"""
Computes the loss as the mean squared error between the predicted
output and the true output.
"""
def __call__(self: MeanSquareError, x: Array1D, y: Array1D) -> float:
y_pred = self.model(x)
y_pred -= y
y_pred **= 2
return y_pred.mean() / 2
def dy(self: MeanSquareError, y: Array1D, y_pred: Array1D) -> Array1D:
y_pred -= y
y_pred /= y.size
return y_pred
|
py | 7dfe40b9a1a00c2ee8dee5d42c699f635afa28d3 | """Function and settings for creating plots"""
import sys
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
def set_default_appearance():
plt.rcParams['lines.linewidth'] = 1.0
plt.rcParams['lines.markeredgewidth'] = 1.0
plt.rcParams['lines.markersize'] = 2.5
# Fonts and symbols
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times New Roman'
plt.rcParams['font.weight'] = 'normal'
plt.rcParams['font.size'] = '8'
plt.rcParams['text.usetex'] = False
plt.rcParams['mathtext.rm'] = 'serif'
plt.rcParams['mathtext.it'] = 'serif:italic'
plt.rcParams['mathtext.fontset'] = 'stix'
# Axes
plt.rcParams['axes.edgecolor'] = (0.0, 0.0, 0.0)
plt.rcParams['axes.linewidth'] = 0.8
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
# Ticks
plt.rcParams['xtick.color'] = (0.0, 0.0, 0.0)
plt.rcParams['xtick.major.width'] = 0.8
plt.rcParams['ytick.color'] = (0.0, 0.0, 0.0)
plt.rcParams['ytick.major.width'] = 0.8
# Errorbar plots
plt.rcParams['errorbar.capsize'] = 2
def set_thesis_appearance():
plt.rcParams['lines.linewidth'] = 1.0
plt.rcParams['lines.markeredgewidth'] = 1.0
plt.rcParams['lines.markersize'] = 1.0
# Fonts and symbols
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = [
r'\usepackage{newpxtext}',
r'\usepackage{siunitx}',
r'\DeclareSIUnit\Molar{M}',
r'\DeclareSIUnit\kb{\ensuremath{k_\textrm{B}}}',
r'\usepackage{newpxmath}']
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = '9'
# Axes
plt.rcParams['axes.edgecolor'] = (0.0, 0.0, 0.0)
plt.rcParams['axes.linewidth'] = 0.8
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
# Ticks
plt.rcParams['xtick.color'] = (0.0, 0.0, 0.0)
plt.rcParams['xtick.major.width'] = 0.8
plt.rcParams['ytick.color'] = (0.0, 0.0, 0.0)
plt.rcParams['ytick.major.width'] = 0.8
# Errorbar plots
plt.rcParams['errorbar.capsize'] = 2
def read_expectations(filebase):
aves_filename = '{}.aves'.format(filebase)
aves = pd.read_csv(aves_filename, sep=' ')
stds_filename = '{}.stds'.format(filebase)
stds = pd.read_csv(stds_filename, sep=' ')
return aves, stds
def cm_to_inches(cm):
return cm/2.54
|
py | 7dfe41d60eb45556006b68a04d17b427a3a062b3 | # -*- coding: utf-8 -*-
#
# Part 1 action script
#
from audio_tools import *
import os
import re
import time
mapdata_path = "mapdata/"
try:
divisor = GLOBAL["divisor"]
except:
divisor = 4
def step1_load_maps(path):
# Test paths and node
test_process_path("node")
if not os.path.isdir(mapdata_path):
os.mkdir(mapdata_path)
# Open maplist
with open(path, encoding="utf8") as fp:
fcont = fp.readlines()
# Reset results
results = []
for line in fcont:
results.append(line)
# Remove maps
for file in os.listdir(mapdata_path):
if file.endswith(".npz"):
os.remove(os.path.join(mapdata_path, file))
print("Number of filtered maps: {}".format(len(results)))
for k, mname in enumerate(results):
try:
start = time.time()
read_and_save_osu_file(mname.strip(), filename=os.path.join(
mapdata_path, str(k)), divisor=divisor)
end = time.time()
print("Map data #" + str(k) + " saved! time = " +
str(end - start) + " secs")
except Exception as e:
print("Error on #{}, path = {}, error = {}".format(
str(k), mname.strip(), e))
|
py | 7dfe438ad34409eb7c851aea93a08a637939d580 | import urlfetch
import json
# 获取各省份统计数据
url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5'
res = urlfetch.fetch(url)
resstr = res.content.decode('utf-8')
# JSON数据解析
jsonRes = json.loads(resstr)
data = jsonRes['data']
data = json.loads(data)['areaTree']
data = data[0]['children']
#构造echarts数据
outall = ''
for single in data:
#{ name: '湖北', value:4586 },
outstr = '\t\t{ name: \'' + single['name'] + '\', value: '+str(single['total']['confirm'])+' },\n'
outall = outall +outstr
# 读取图形模板HTML
fid = open('MapPiecewise20200131temp.html','rb')
oriHtml = fid.read().decode('utf-8')
fid.close()
# 输出导入数据后的HTML
oriHtml = oriHtml.replace('//insertData//',outall)
fid = open('MapPiecewise20200131Modified.html','wb')
oriHtml = fid.write(oriHtml.encode('utf-8'))
fid.close()
|
py | 7dfe44ac4d575ce8b981317d207b763c2f6d7253 | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
"""Function implementation"""
import logging
import time
import json
from datetime import datetime
from threading import current_thread
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import RequestsCommon
from fn_mcafee_esm.util.helper import check_config, get_authenticated_headers, check_status_code
log = logging.getLogger(__name__)
def case_get_case_detail(rc, options, headers, id):
url = options["esm_url"] + "/rs/esm/v2/caseGetCaseDetail"
payload = {
"id": id
}
r = rc.execute_call_v2('post', url, headers=headers, data=json.dumps(payload), verify=options["trust_cert"],
proxies=rc.get_proxies())
log.debug(r.content)
if r.status_code == 400:
raise FunctionError(r.content)
check_status_code(r.status_code)
return r.json()
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'mcafee_esm_get_case_detail"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.opts = opts
self.options = opts.get("fn_mcafee_esm", {})
# Check config file and change trust_cert to Boolean
self.options = check_config(self.options)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.opts = opts
self.options = opts.get("fn_mcafee_esm", {})
@function("mcafee_esm_get_case_detail")
def _mcafee_esm_get_case_detail_function(self, event, *args, **kwargs):
"""Function: Calls the caseGetCaseDetail endpoint and returns all the details of a case."""
try:
start_time = time.time()
yield StatusMessage("starting...")
options = self.options
# Instantiate RequestsCommon object
rc = RequestsCommon(opts=self.opts, function_opts=self.options)
authenticated_headers = get_authenticated_headers(rc, options["esm_url"], options["esm_username"],
options["esm_password"], options["trust_cert"])
# Get the function parameters:
mcafee_esm_case_id = kwargs.get("mcafee_esm_case_id") # number
log = logging.getLogger(__name__)
if not mcafee_esm_case_id:
raise ValueError("mcafee_esm_case_id is required")
log.info("mcafee_esm_case_id: %s", mcafee_esm_case_id)
# Get case details
details = case_get_case_detail(rc, options, authenticated_headers, mcafee_esm_case_id)
end_time = time.time()
results = {
"inputs": {
"mcafee_esm_case_id": mcafee_esm_case_id
},
"metrics": {
"execution_time": str(end_time - start_time),
"function": "mcafee_esm_get_case_detail",
"thread": current_thread().name,
"timestamp": datetime.fromtimestamp(end_time).strftime("%Y-%m-%d %H:%M:%S")
},
"details": details
}
yield StatusMessage("done...")
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as e:
yield FunctionError(e)
|
py | 7dfe44c39ad9e40831081b8c5be388436e7510c5 | # -*- coding: utf-8 -*-
#
# talks.ox documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 8 11:43:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinxcontrib.httpdomain',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {'widget': ('http://talksox.readthedocs.org/projects/talksox-js-widget/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
rst_epilog = """
.. |upcoming| replace:: New Feature!
.. |changes| replace:: There have been changes to this part of Oxford Talks in the latest release (May 2016)
"""
# General information about the project.
project = u'talks.ox'
copyright = u'2015, Mobile Oxford Team, Software Solutions, IT Services, University of Oxford'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'talksoxdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'talksox.tex', u'talks.ox Documentation',
u'Mobile Oxford Team, Software Solutions, IT Services, University of Oxford', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'talksox', u'talks.ox Documentation',
[u'Mobile Oxford Team, Software Solutions, IT Services, University of Oxford'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'talksox', u'talks.ox Documentation',
u'Mobile Oxford Team, Software Solutions, IT Services, University of Oxford', 'talksox', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
py | 7dfe44e1afbdf979cb72a6c8432d45368fa99c7d | __author__ = 'saeedamen'
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
TradeAnalysis
Applies some basic trade analysis for a trading strategy (as defined by TradingModel). Use PyFolio to create some
basic trading statistics. Also allows you test multiple parameters for a specific strategy (like TC).
"""
pf = None
try:
import pyfolio as pf
except: pass
import datetime
import matplotlib
import matplotlib.pyplot as plt
import pandas
from chartpy import Chart, Style, ChartConstants
from findatapy.timeseries import Calculations, Timezone
from findatapy.util.loggermanager import LoggerManager
from finmarketpy.backtest import Backtest
class TradeAnalysis(object):
def __init__(self, engine = ChartConstants().chartfactory_default_engine):
self.logger = LoggerManager().getLogger(__name__)
self.DUMP_PATH = 'output_data/' + datetime.date.today().strftime("%Y%m%d") + ' '
self.SCALE_FACTOR = 3
self.DEFAULT_PLOT_ENGINE = engine
self.chart = Chart(engine=self.DEFAULT_PLOT_ENGINE)
return
def run_strategy_returns_stats(self, trading_model):
"""
run_strategy_returns_stats - Plots useful statistics for the trading strategy (using PyFolio)
Parameters
----------
trading_model : TradingModel
defining trading strategy
"""
pnl = trading_model.get_strategy_pnl()
tz = Timezone()
calculations = Calculations()
# PyFolio assumes UTC time based DataFrames (so force this localisation)
try:
pnl = tz.localise_index_as_UTC(pnl)
except: pass
# set the matplotlib style sheet & defaults
# at present this only works in Matplotlib engine
try:
matplotlib.rcdefaults()
plt.style.use(ChartConstants().chartfactory_style_sheet['chartpy-pyfolio'])
except: pass
# TODO for intraday strategies, make daily
# convert DataFrame (assumed to have only one column) to Series
pnl = calculations.calculate_returns(pnl)
pnl = pnl.dropna()
pnl = pnl[pnl.columns[0]]
fig = pf.create_returns_tear_sheet(pnl, return_fig=True)
try:
plt.savefig (trading_model.DUMP_PATH + "stats.png")
except: pass
plt.show()
def run_tc_shock(self, strategy, tc = None):
if tc is None: tc = [0, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2.0]
parameter_list = [{'spot_tc_bp' : x } for x in tc]
pretty_portfolio_names = [str(x) + 'bp' for x in tc] # names of the portfolio
parameter_type = 'TC analysis' # broad type of parameter name
return self.run_arbitrary_sensitivity(strategy,
parameter_list=parameter_list,
pretty_portfolio_names=pretty_portfolio_names,
parameter_type=parameter_type)
###### Parameters and signal generations (need to be customised for every model)
def run_arbitrary_sensitivity(self, trading_model, parameter_list = None, parameter_names = None,
pretty_portfolio_names = None, parameter_type = None):
asset_df, spot_df, spot_df2, basket_dict = trading_model.fill_assets()
port_list = None
ret_stats_list = []
for i in range(0, len(parameter_list)):
br = trading_model.fill_backtest_request()
current_parameter = parameter_list[i]
# for calculating P&L
for k in current_parameter.keys():
setattr(br, k, current_parameter[k])
trading_model.br = br # for calculating signals
signal_df = trading_model.construct_signal(spot_df, spot_df2, br.tech_params, br)
backtest = Backtest()
self.logger.info("Calculating... " + str(pretty_portfolio_names[i]))
backtest.calculate_trading_PnL(br, asset_df, signal_df)
ret_stats_list.append(backtest.get_portfolio_pnl_ret_stats())
stats = str(backtest.get_portfolio_pnl_desc()[0])
port = backtest.get_cumportfolio().resample('B').mean()
port.columns = [str(pretty_portfolio_names[i]) + ' ' + stats]
if port_list is None:
port_list = port
else:
port_list = port_list.join(port)
# reset the parameters of the strategy
trading_model.br = trading_model.fill_backtest_request()
style = Style()
ir = [t.inforatio()[0] for t in ret_stats_list]
# if we have too many combinations remove legend and use scaled shaded colour
# if len(port_list) > 10:
# style.color = 'Blues'
# style.display_legend = False
# plot all the variations
style.resample = 'B'
style.file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' ' + parameter_type + '.png'
style.html_file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' ' + parameter_type + '.html'
style.scale_factor = self.SCALE_FACTOR
style.title = trading_model.FINAL_STRATEGY + ' ' + parameter_type
self.chart.plot(port_list, chart_type='line', style=style)
# plot all the IR in a bar chart form (can be easier to read!)
style = Style()
style.file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' ' + parameter_type + ' IR.png'
style.html_file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' ' + parameter_type + ' IR.html'
style.scale_factor = self.SCALE_FACTOR
style.title = trading_model.FINAL_STRATEGY + ' ' + parameter_type
summary = pandas.DataFrame(index = pretty_portfolio_names, data = ir, columns = ['IR'])
self.chart.plot(summary, chart_type='bar', style=style)
return port_list
###### Parameters and signal generations (need to be customised for every model)
###### Plot all the output seperately
def run_arbitrary_sensitivity_separately(self, trading_model, parameter_list = None,
pretty_portfolio_names = None, strip = None):
# asset_df, spot_df, spot_df2, basket_dict = strat.fill_assets()
final_strategy = trading_model.FINAL_STRATEGY
for i in range(0, len(parameter_list)):
br = trading_model.fill_backtest_request()
current_parameter = parameter_list[i]
# for calculating P&L
for k in current_parameter.keys():
setattr(br, k, current_parameter[k])
trading_model.FINAL_STRATEGY = final_strategy + " " + pretty_portfolio_names[i]
self.logger.info("Calculating... " + pretty_portfolio_names[i])
trading_model.br = br
trading_model.construct_strategy(br = br)
trading_model.plot_strategy_pnl()
trading_model.plot_strategy_leverage()
trading_model.plot_strategy_group_benchmark_pnl(strip = strip)
# reset the parameters of the strategy
trading_model.br = trading_model.fill_backtest_request()
trading_model.FINAL_STRATEGY = final_strategy
def run_day_of_month_analysis(self, trading_model):
from finmarketpy.economics.seasonality import Seasonality
calculations = Calculations()
seas = Seasonality()
trading_model.construct_strategy()
pnl = trading_model.get_strategy_pnl()
# get seasonality by day of the month
pnl = pnl.resample('B').mean()
rets = calculations.calculate_returns(pnl)
bus_day = seas.bus_day_of_month_seasonality(rets, add_average = True)
# get seasonality by month
pnl = pnl.resample('BM').mean()
rets = calculations.calculate_returns(pnl)
month = seas.monthly_seasonality(rets)
self.logger.info("About to plot seasonality...")
style = Style()
# Plotting spot over day of month/month of year
style.color = 'Blues'
style.scale_factor = self.SCALE_FACTOR
style.file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' seasonality day of month.png'
style.html_file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' seasonality day of month.html'
style.title = trading_model.FINAL_STRATEGY + ' day of month seasonality'
style.display_legend = False
style.color_2_series = [bus_day.columns[-1]]
style.color_2 = ['red'] # red, pink
style.linewidth_2 = 4
style.linewidth_2_series = [bus_day.columns[-1]]
style.y_axis_2_series = [bus_day.columns[-1]]
self.chart.plot(bus_day, chart_type='line', style=style)
style = Style()
style.scale_factor = self.SCALE_FACTOR
style.file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' seasonality month of year.png'
style.html_file_output = self.DUMP_PATH + trading_model.FINAL_STRATEGY + ' seasonality month of year.html'
style.title = trading_model.FINAL_STRATEGY + ' month of year seasonality'
self.chart.plot(month, chart_type='line', style=style)
return month
|
py | 7dfe45820236f1804aeec2a8774835ca59d63c24 | #
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Weave TLV Schema errors.
#
import os
class WeaveTLVSchemaError(Exception):
def __init__(self, msg, detail=None, sourceRef=None):
super(WeaveTLVSchemaError, self).__init__(msg)
self.detail = detail
self.sourceRef = sourceRef
def format(self, withTextMarker=True, withDetail=True):
res = 'ERROR: ' + str(self)
if withDetail and self.detail is not None:
res = res + "\nNOTE: " + self.detail
if self.sourceRef:
res = '%s: %s' % (self.sourceRef.filePosStr(), res)
if withTextMarker:
res += '\n\n' + self.sourceRef.lineSummaryStr()
return res
class AmbiguousTagError(Exception):
pass
|
py | 7dfe45dcc06a275154175f683181ab5c967c8ce4 | from tkinter.ttk import Style
import webbrowser
import validators
import sys
from colorama import Fore, Style
from colorama import init, AnsiToWin32
init(wrap=False)
stream = AnsiToWin32(sys.stderr).stream
init()
y = ["yes", "y", "Y", "YES"]
n = ["no", "n", "N", "NO"]
def main():
global name
web = input(f"[{Fore.RED}Website{Style.RESET_ALL}] ")
url = validators.url(web)
if url == True:
print(f"[{Fore.GREEN}Found{Style.RESET_ALL}] {Fore.BLUE}{web}{Style.RESET_ALL}")
yn = input(f"[{Fore.CYAN}Open{Style.RESET_ALL}] ")
if yn in y:
webbrowser.open(web)
main()
elif yn in n:
main()
else:
main()
main()
else:
print(f"[{Fore.LIGHTBLUE_EX}x{Style.RESET_ALL}] Not found any website called '{web}'")
main()
main() |
py | 7dfe461614827cd87446cdb5c2d1e6d48dac6973 | import argparse
import os
import time
import json
from typing import List
from dataclasses import dataclass
import scapy
import subprocess
from rx.scheduler import NewThreadScheduler
from src.device_sniffer import DeviceSniffer
from src.attendance_upload_service import AttendanceUploadService
from src.state_context_manager import AttendanceStateContextManager
from src.config import Config
from src.user_querier import UserQuerier
from src.file_queue import FileQueue
from src.connection_service import ConnectionService, SimpleConnectionService
@dataclass
class CommandLineArgs:
interface: str
queue_path: str
enable_monitor_wakeup: bool
configs: List[Config]
enable_regular_queue_check: bool
regular_queue_check_interval: int
def send_queued_messages_if_connected_to_internet(upload_service: AttendanceUploadService, connection_service: ConnectionService) -> None:
print("Checking unsent messages...")
if not connection_service.is_connected_to_internet():
print("No internet connection")
return
print("Sending unsent messages...")
upload_service.process_queued_messages()
print("Sent unsent messages")
def cmdline() -> CommandLineArgs:
parser = argparse.ArgumentParser()
parser.add_argument('--interface', '-i', default='mon0', help='monitor mode enabled interface')
parser.add_argument('--config', '-c', required=True, help='path to JSON config file')
parser.add_argument('--queue-path', '-q', required=True, help='path for saving queue')
parser.add_argument('--enable-monitor-on-wakeup', '-w', action='store_true', help='Enable monitor mode on specified interface on startup')
parser.add_argument('--enable-regular-queue-check', '-r', action='store_true', help='Regularly check queue and send messages if any')
parser.add_argument('--regular-queue-check-interval', type=int, default=600, help='Interval for checking queue (seconds)')
args = parser.parse_args()
configs: List[Config] = []
with open(args.config, "r") as f:
raw_configs = json.load(f)
for raw_config in raw_configs:
configs.append(Config(raw_config["userid"], raw_config["ssid"], raw_config["mac_address"], raw_config["absence_due_second"]))
return CommandLineArgs(
interface=args.interface,
queue_path=args.queue_path,
enable_monitor_wakeup=args.enable_monitor_on_wakeup,
configs=configs,
enable_regular_queue_check=args.enable_regular_queue_check,
regular_queue_check_interval=args.regular_queue_check_interval
)
def enable_monitor_mode(interface: str) -> None:
cmd_result = subprocess.run(["ip", "link", "set", interface, "down"])
if cmd_result.returncode == -1:
raise RuntimeError()
cmd_result = subprocess.run(["iw", interface, "set", "monitor", "none"])
if cmd_result.returncode == -1:
raise RuntimeError()
cmd_result = subprocess.run(["ip", "link", "set", interface, "up"])
if cmd_result.returncode == -1:
raise RuntimeError()
if __name__ == "__main__":
args = cmdline()
if args.enable_monitor_wakeup:
try:
enable_monitor_mode(args.interface)
except:
raise RuntimeError("Failed while enabling monitor mode")
print(f"Listening on {args.interface}")
queue = FileQueue(args.queue_path)
connection_string = os.environ.get("IOTHUB_DEVICE_CONNECTION_STRING")
assert len(connection_string) > 0, "IoTHub connection string should not be empty"
upload_service = AttendanceUploadService.create(connection_string, queue, is_dry_run=False)
user_querier = UserQuerier(args.configs)
device_sniffer = DeviceSniffer(user_querier, args.interface)
state_context_manager = AttendanceStateContextManager(args.configs, device_sniffer.get_observable(), upload_service)
if args.enable_regular_queue_check:
regular_check_task = NewThreadScheduler().schedule_periodic(
args.regular_queue_check_interval,
lambda x: send_queued_messages_if_connected_to_internet(upload_service, SimpleConnectionService())
)
try:
device_sniffer.start()
while True:
time.sleep(1)
except KeyboardInterrupt:
device_sniffer.stop()
regular_check_task.dispose()
print("Exiting program...")
|
py | 7dfe4738a1f545a150b87e893eee414b21b4b042 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'StorageAccountPropertiesArgs',
]
@pulumi.input_type
class StorageAccountPropertiesArgs:
def __init__(__self__, *,
access_key: pulumi.Input[str],
storage_account_id: pulumi.Input[str]):
"""
The properties of a storage account for a machine learning team account.
:param pulumi.Input[str] access_key: The access key to the storage account.
:param pulumi.Input[str] storage_account_id: The fully qualified arm Id of the storage account.
"""
pulumi.set(__self__, "access_key", access_key)
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> pulumi.Input[str]:
"""
The access key to the storage account.
"""
return pulumi.get(self, "access_key")
@access_key.setter
def access_key(self, value: pulumi.Input[str]):
pulumi.set(self, "access_key", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> pulumi.Input[str]:
"""
The fully qualified arm Id of the storage account.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_id", value)
|
py | 7dfe47a35c567f3425c488956688b85ead0bfadc | import torch
import torch.nn as nn
from .helpers import replace_denormals
class NegativeSNRLoss(nn.Module):
"""
Negative Signal-to-Noise Ratio loss.
Calculates the negative SNR over a predicted output and ground truth
output pair.
Args:
eps (float): Machine epsilon.
"""
def __init__(self,
eps: float = torch.finfo(torch.float32).eps):
super().__init__()
self.eps = eps
def forward(self, y_pred: torch.tensor, y: torch.tensor):
""" Calculates the negative SNR loss based on a predicted output and a
ground truth output.
Args:
y_pred (torch.tensor): Predicted tensor containing the denoised
signal.
y (torch.tensor): Ground truth tensor containing the clean signal.
Returns:
loss (torch.tensor): 1D tensor containing the loss function value
"""
numerator = torch.sum(torch.square(y), dim=-1, keepdim=True)
denominator = torch.sum(torch.square(y - y_pred), dim=-1, keepdim=True)
loss = -10 * torch.log10(numerator / denominator + self.eps)
# experimental result based on 7 significant digits for torch.float32
loss[torch.isneginf(loss)] = -140.0
return torch.mean(loss, dim=0)
class GainMaskBasedNegativeSNRLoss(nn.Module):
""" Negative Signal-to-Noise Ratio loss for gain mask based networks.
Calculates the negative SNR over a predicted spectral mask and a complex
stft output of a noisy speech signal and ground truth clean signal.
"""
def __init__(self,
window_size: int = 512,
hop_size: int = 128,
eps: float = 1e-10):
super().__init__()
self.window_size = window_size
self.hop_size = hop_size
self.eps = eps
self._window = torch.hann_window(self.window_size)
self._negative_snr_loss = NegativeSNRLoss(eps=self.eps)
def istft(self, x_complex: torch.tensor):
window = self._window.to(x_complex.device)
istft = torch.istft(x_complex,
onesided=True,
center=True,
n_fft=self.window_size,
hop_length=self.hop_size,
normalized=False,
window=window)
return istft
def forward(self, y_pred_mask: torch.tensor, x_complex: torch.tensor,
y_complex: torch.tensor):
"""
Calculates the negative SNR over a predicted spectral mask and a complex
stft output of a noisy speech signal and ground truth clean signal.
Args:
y_pred_mask (torch.tensor): Predicted tensor containing the gain mask
to be applied to the complex stft input x_complex.
x_complex (torch.tensor): Tensor containing the complex stft of
the input signal.
y_complex (torch.tensor): Tensor containing the ground truth complex
stft of the output signal.
Returns:
loss (torch.tensor): 1D tensor containing the loss function value
"""
y_pred_complex = y_pred_mask.squeeze(1).permute(0, 2, 1) * x_complex
y_pred = self.istft(y_pred_complex)
y = self.istft(y_complex)
return self._negative_snr_loss(y_pred, y)
class ComplexCompressedMSELoss(nn.Module):
""" Complex Compressed Mean Square Error Loss implemented as shown in
section two of:
https://arxiv.org/pdf/2101.09249.pdf
c_ (float): Compression factor.
lambda_ (float): Weighting factor.
eps (float): Machine epsilon.
"""
def __init__(self,
c_: float = 0.3,
lambda_: float = 0.3,
eps: float = 1e-10):
super().__init__()
self.c_ = c_
self.lambda_ = lambda_
self.eps = eps
def forward(self, y_pred_mask: torch.tensor, x_complex: torch.tensor,
y_complex: torch.tensor):
# clean denormals
y_complex = replace_denormals(torch.real(y_complex)) + \
1j * torch.imag(y_complex)
# get target magnitude and phase
y_mag = torch.abs(y_complex)
y_phase = torch.angle(y_complex)
# predicted complex stft
y_pred_mask = y_pred_mask.squeeze(1).permute(0, 2, 1)
y_pred_complex = y_pred_mask.type(torch.complex64) * x_complex
# clean denormals
y_pred_complex = replace_denormals(torch.real(y_pred_complex)) + \
1j * torch.imag(y_pred_complex)
# get predicted magnitude annd phase
y_pred_mag = torch.abs(y_pred_complex)
y_pred_phase = torch.angle(y_pred_complex)
# target complex exponential
y_complex_exp = (y_mag ** self.c_).type(torch.complex64) * \
torch.exp(1j * y_phase.type(torch.complex64))
# predicted complex exponential
y_pred_complex_exp = (y_pred_mag ** self.c_).type(torch.complex64) * \
torch.exp(1j * y_pred_phase.type(torch.complex64))
# magnitude only loss component
mag_loss = torch.abs(y_mag ** self.c_ - y_pred_mag ** self.c_) ** 2
mag_loss = torch.sum(mag_loss, dim=[1, 2])
# complex loss component
complex_loss = torch.abs(y_complex_exp - y_pred_complex_exp) ** 2
complex_loss = torch.sum(complex_loss, dim=[1, 2])
# blend both loss components
loss = (1 - self.lambda_) * mag_loss + (self.lambda_) * complex_loss
# returns the mean blended loss of the batch
return torch.mean(loss)
|
py | 7dfe487a1044a2c75f8dc5e980c977bb5cd0134f | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.service_type import ServiceTypeFragment, QUERY as ServiceTypeFragmentQuery
from ..input.service_type_create_data import ServiceTypeCreateData
QUERY: List[str] = ServiceTypeFragmentQuery + ["""
mutation AddServiceTypeMutation($data: ServiceTypeCreateData!) {
addServiceType(data: $data) {
...ServiceTypeFragment
}
}
"""]
@dataclass
class AddServiceTypeMutation(DataClassJsonMixin):
@dataclass
class AddServiceTypeMutationData(DataClassJsonMixin):
@dataclass
class ServiceType(ServiceTypeFragment):
pass
addServiceType: ServiceType
data: AddServiceTypeMutationData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, data: ServiceTypeCreateData) -> AddServiceTypeMutationData.ServiceType:
# fmt: off
variables = {"data": data}
try:
network_start = perf_counter()
response_text = client.call(''.join(set(QUERY)), variables=variables)
decode_start = perf_counter()
res = cls.from_json(response_text).data
decode_time = perf_counter() - decode_start
network_time = decode_start - network_start
client.reporter.log_successful_operation("AddServiceTypeMutation", variables, network_time, decode_time)
return res.addServiceType
except OperationException as e:
raise FailedOperationException(
client.reporter,
e.err_msg,
e.err_id,
"AddServiceTypeMutation",
variables,
)
|
py | 7dfe49355490c5cec49fd74a8e545bcaf78cb07c | #!/usr/bin/env python3
import json
import sqlite3
import sys
bus_stops_map = {}
conn = sqlite3.connect('datamall.sqlite')
conn.row_factory = sqlite3.Row
c = conn.cursor()
for row in c.execute('''select bus_stop_code code, road_name, description, latitude, longitude from bus_stops'''):
stop = dict(row)
bus_stops_map[stop['code']] = stop
conn.row_factory = None
c = conn.cursor()
for key in bus_stops_map.keys():
c.execute('''select distinct service_no
from bus_routes
where bus_stop_code = ?''', (key,))
rows = c.fetchall()
services = [r[0] for r in rows]
bus_stops_map[key]['services'] = services
bus_stops = sorted(bus_stops_map.values(), key=lambda s: s['code'])
json.dump(bus_stops, sys.stdout)
|
py | 7dfe4aaecc0fe0d1695feadfcfd015f4939fa353 | import inspect
from typing import Type, Iterable, Dict, Union
from django.db import models
from slothql import Field
from slothql.types.object import Object, ObjectMeta, ObjectOptions
from slothql.django.utils.model import get_model_attrs
from .registry import TypeRegistry
class ModelOptions(ObjectOptions):
__slots__ = ('model',)
def __init__(self, attrs: dict):
super().__init__(attrs)
assert self.abstract or self.model, f'"model" is required for object ModelOptions'
class ModelMeta(ObjectMeta):
def __new__(mcs, name, bases, attrs: dict, options_class: Type[ModelOptions] = ModelOptions, **kwargs):
assert 'Meta' in attrs, f'class {name} is missing "Meta" class'
return super().__new__(mcs, name, bases, attrs, options_class, **kwargs)
@classmethod
def get_option_attrs(mcs, base_attrs: dict, attrs: dict, meta_attrs: dict):
fields = meta_attrs.pop('fields', None)
if fields:
model = base_attrs.get('model') or meta_attrs.get('model')
resolved_fields = mcs.get_meta_fields(model, fields)
attrs.update({name: resolved_fields[name] for name in set(resolved_fields) - set(attrs)})
return super().get_option_attrs(base_attrs, attrs, meta_attrs)
@classmethod
def get_meta_fields(mcs, model: Type[models.Model], fields: Union[str, Iterable[str]]) -> Dict[str, Field]:
assert fields == '__all__' or isinstance(fields, Iterable) and all(isinstance(f, str) for f in fields), \
f'Meta.fields needs to be an iterable of field names or "__all__", but received {fields}'
assert model, f'Meta.model is required when using Meta.fields'
assert inspect.isclass(model) and issubclass(model, models.Model), \
f'Meta.model has to be Model class, received {model}'
model_attrs = mcs.get_attrs(model, fields)
for name, field in model_attrs.items():
assert isinstance(field, models.Field), f'"{name}": field cannot be {field}'
return {name: TypeRegistry().get(field) for name, field in model_attrs.items()}
@classmethod
def resolve_attr_list(mcs, model: Type[models.Model], fields: Iterable[str]) -> dict:
attrs = {}
model_attrs = get_model_attrs(model)
for name in fields:
assert name in model_attrs, f'"{name}" is not a valid field for model "{model.__name__}"'
for name, attr in model_attrs.items():
if name not in fields:
continue
assert not isinstance(attr, TypeRegistry.RELATION_TYPES), \
f'"{name}" has to be declared explicitly, to avoid type collisions'
attrs[name] = attr
return attrs
@classmethod
def resolve_all_fields(mcs, model: Type[models.Model]) -> dict:
return {
name: attr for name, attr in get_model_attrs(model).items()
if not isinstance(attr, TypeRegistry.RELATION_TYPES)
}
@classmethod
def get_attrs(mcs, model: Type[models.Model], fields: Union[str, Iterable[str]]) -> dict:
if fields == '__all__':
return mcs.resolve_all_fields(model)
return mcs.resolve_attr_list(model, fields)
class Model(Object, metaclass=ModelMeta):
class Meta:
abstract = True
@classmethod
def resolve(cls, obj, info):
if obj is None:
return cls._meta.model._default_manager.get_queryset()
return obj.get_queryset() if isinstance(obj, models.Manager) else obj
|
py | 7dfe4b2db04e164e0f0b8f44717ab6d1b94123da |
# 单机调试使用前要先启动zookeeper和kafka服务
# 启动zookeeper要cd /home/lp/soft/kafka_2.11-1.1.0,然后 bin/zookeeper-server-start.sh config/zookeeper.properties 修改后端口2185
# 启动kafka要cd /home/lp/soft/kafka_2.11-1.1.0,然后bin/kafka-server-start.sh config/server.properties 端口9092
from kafka import KafkaConsumer
from kafka import KafkaClient,SimpleClient
from common.config import *
class kafka_consumer():
def __init__(self,kafka_server=KAFKA_SERVER_IP):
self.kafka_servers=kafka_server # kafka服务器的消费者接口
# ======读取当前数据==========
# 使用group,对于同一个group的成员只有一个消费者实例可以读取数据。callback为回调函数,这是一个堵塞进行
def read_data_now(self,callback,topic='device',group_id=None,auto_offset_reset='latest'):
if(group_id):
consumer = KafkaConsumer(topic,group_id=group_id,auto_offset_reset=auto_offset_reset,bootstrap_servers=self.kafka_servers)
for message in consumer:
callback(message)
# print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value))
else:
consumer = KafkaConsumer(topic,auto_offset_reset=auto_offset_reset,bootstrap_servers=self.kafka_servers)
for message in consumer:
callback(message)
# print("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition, message.offset, message.key, message.value))
# # ==========读取指定位置消息===============
# from kafka import KafkaConsumer
# from kafka.structs import TopicPartition
#
# consumer = KafkaConsumer('test',bootstrap_servers=['127.0.0.1:9092'])
#
# print(consumer.partitions_for_topic("test")) #获取test主题的分区信息
# print(consumer.topics()) #获取主题列表
# print(consumer.subscription()) #获取当前消费者订阅的主题
# print(consumer.assignment()) #获取当前消费者topic、分区信息
# print(consumer.beginning_offsets(consumer.assignment())) #获取当前消费者可消费的偏移量
# consumer.seek(TopicPartition(topic='test', partition=0), 5) #重置偏移量,从第5个偏移量消费
# for message in consumer:
# print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value))
# =======订阅多个消费者==========
# from kafka import KafkaConsumer
# from kafka.structs import TopicPartition
#
# consumer = KafkaConsumer(bootstrap_servers=['127.0.0.1:9092'])
# consumer.subscribe(topics=('test','test0')) #订阅要消费的主题
# print(consumer.topics())
# print(consumer.position(TopicPartition(topic='test', partition=0))) #获取当前主题的最新偏移量
# for message in consumer:
# print ("%s:%d:%d: key=%s value=%s" % (message.topic, message.partition,message.offset, message.key,message.value))
# ==========消费者(手动拉取消息)============
#
# from kafka import KafkaConsumer
# import time
#
# consumer = KafkaConsumer(bootstrap_servers=['127.0.0.1:9092'])
# consumer.subscribe(topics=('test','test0'))
# while True:
# msg = consumer.poll(timeout_ms=5) #从kafka获取消息
# print(msg)
# time.sleep(2)
# ==============消息恢复和挂起===========
# from kafka import KafkaConsumer
# from kafka.structs import TopicPartition
# import time
#
# consumer = KafkaConsumer(bootstrap_servers=['127.0.0.1:9092'])
# consumer.subscribe(topics=('test'))
# consumer.topics()
# consumer.pause(TopicPartition(topic=u'test', partition=0)) # pause执行后,consumer不能读取,直到调用resume后恢复。
# num = 0
# while True:
# print(num)
# print(consumer.paused()) #获取当前挂起的消费者
# msg = consumer.poll(timeout_ms=5)
# print(msg)
# time.sleep(2)
# num = num + 1
# if num == 10:
# print("resume...")
# consumer.resume(TopicPartition(topic='test', partition=0))
# print("resume......")
|
py | 7dfe4b6df8f9577bc5311d7d37b3a86be28053e5 | from .detector3d_template import Detector3DTemplate
class PointPillar(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
if 'vfemap_vis_dict' in batch_dict:
recall_dicts['vfemap_vis_dict'] = batch_dict['vfemap_vis_dict']
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
|
py | 7dfe4ba686782decf22179dd237ceb6fcbfe33a3 | import sys
import os
import re
import io
from PIL import Image
import magic
from django.core import urlresolvers
from django.core.files import uploadedfile
from django.conf import settings as django_settings
from . import settings
def get_choices_url_pattern():
"""
Returns the pattern of URL for getting product choices
via AJAX in JS code.
"""
# get 'choices' URL using 'reverse',
# the pattern requires one numeric argument
choices_url = urlresolvers.reverse('content_gallery:choices', args=(0,))
# remove argument (last digits in the URL with '/' optionally)
return re.sub(r'\d+/?$', '', choices_url)
def get_gallery_data_url_pattern():
"""
Returns the pattern of URL for getting data of images
related to the object
"""
# get 'gallery_data' using 'reverse',
# the pattern requires two words and one number as arguments
choices_url = urlresolvers.reverse(
'content_gallery:gallery_data',
args=(
'app_label',
'content_type',
0
)
)
# remove arguments
return re.sub(r'\w+/\w+/\d+/?$', '', choices_url)
def get_admin_new_image_preview_url_pattern():
"""
Returns the pattern of URL for getting data of the image.
Used to get data of new added image.
"""
# get 'gallery_new_image_preview' using 'reverse',
# the pattern requires one numeric argument
preview_url = urlresolvers.reverse(
'admin:gallery_new_image_preview',
args=(0,)
)
# remove argument (last digits in the URL with '/' optionally)
return re.sub(r'\d+/?$', '', preview_url)
def calculate_image_size(size, target_size):
"""
Returns the size of the image after resizing.
The same code is used in PIL.Image.thumbnail
The results of this function is used by JavaScript
in resize effect while changing image.
"""
x, y = size
# if the width is greater than the target
if x > target_size[0]:
# proportionally decrease the height but not less than 1px
y = int(max(y * target_size[0] / x, 1))
x = int(target_size[0]) # set the width to the target width
# if the height is still greater than the target
if y > target_size[1]:
# proportionally decrease the width but not less than 1px
x = int(max(x * target_size[1] / y, 1))
y = int(target_size[1]) # set the height to the target height
return x, y
def get_ext(filename):
"""
Returns the ext of the file name with prefix dot
"""
name, ext = os.path.splitext(filename)
return ext
def get_name(filename):
"""
Returns the name of the file name without the ext
"""
name, ext = os.path.splitext(filename)
return name
def create_path(filename):
"""
Returns the path to the file located in the gallery folder
"""
return os.path.join(
django_settings.MEDIA_ROOT,
settings.CONF['path'],
filename
)
def create_url(filename):
"""
Returns the URL of the file located in the gallery folder
"""
# remove slashes to avoid double slashes in the URL
# keep the first slash in the MEDIA_URL
media_url = django_settings.MEDIA_URL.rstrip('/')
gallery_path = settings.CONF['path'].strip('/')
return '/'.join([media_url, gallery_path, filename])
def name_in_db(name):
"""
Returns the name of the file after saving data to the database
Adds the gallery folder to the file name
"""
return os.path.join(settings.CONF['path'], name)
def image_resize(src, dst, size):
"""
Resizes the image and saves it to the 'dst' (filename of io object)
"""
with Image.open(src) as img:
img.thumbnail(size) # use 'thumbnail' to keep aspect ratio
img.save(dst, img.format)
def create_in_memory_image(image, name, size):
"""
Resizes the image and saves it as InMemoryUploadedFile object
Returns the InMemoryUploadedFile object with the image data
"""
output = io.BytesIO() # create an io object
# resize the image and save it to the io object
image_resize(image, output, size)
# get MIME type of the image
mime = magic.from_buffer(output.getvalue(), mime=True)
# create InMemoryUploadedFile using data from the io
return uploadedfile.InMemoryUploadedFile(output, 'ImageField', name,
mime, sys.getsizeof(output), None)
def create_image_data(image):
"""
Returns a dict with the full-size image
and the small image URLs with sizes
"""
return {
"image": {
"url": image.image_url,
"width": settings.CONF['image_width'],
"height": settings.CONF['image_height']
},
"small_image": {
"url": image.small_image_url,
"width": settings.CONF['small_image_width'],
"height": settings.CONF['small_image_height']
}
}
def create_static_url(url):
"""
Returns a URL to the file located in the static folder
"""
# remove ending slash to avoid double slashes
static = django_settings.STATIC_URL.rstrip("/")
path = "/".join([static, url])
# use obfuscated file in non-DEBUG mode
return get_obfuscated_file(path)
def get_first_image(obj):
"""
Returns the first image related to the object or None
if there is no images. The first image is the image
with the smallest value of the 'position' field.
"""
# get one image ordered by 'position'
images = obj.content_gallery.all().order_by('position')[:1]
# return None if result is empty
if not images:
return None
# return the first image
return images[0]
def get_obfuscated_file(path):
"""
Adds .min to the filename in non-debug mode
"""
if django_settings.DEBUG:
return path
name, ext = os.path.splitext(path)
return "".join([name, ".min", ext])
|
py | 7dfe4d757735c0a220f9122595a59107eeb6262c | # ___________________________________________________________________________
#
# EGRET: Electrical Grid Research and Engineering Tools
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
## helpers for flow verification across dcopf and unit commitment models
from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver
from egret.model_library.defn import ApproximationType
from egret.common.log import logger, logging
import collections.abc as abc
import egret.model_library.transmission.branch as libbranch
import pyomo.environ as pyo
import numpy as np
import copy as cp
import math
from enum import Enum
class LazyPTDFTerminationCondition(Enum):
NORMAL = 1
ITERATION_LIMIT = 2
FLOW_VIOLATION = 3
def populate_default_ptdf_options(ptdf_options):
if ptdf_options is None:
ptdf_options = dict()
else:
## get a copy
ptdf_options = cp.deepcopy(ptdf_options)
if 'rel_ptdf_tol' not in ptdf_options:
ptdf_options['rel_ptdf_tol'] = 1.e-6
if 'abs_ptdf_tol' not in ptdf_options:
ptdf_options['abs_ptdf_tol'] = 1.e-10
if 'abs_flow_tol' not in ptdf_options:
ptdf_options['abs_flow_tol'] = 1.e-3
if 'rel_flow_tol' not in ptdf_options:
ptdf_options['rel_flow_tol'] = 1.e-5
if 'lazy_rel_flow_tol' not in ptdf_options:
ptdf_options['lazy_rel_flow_tol'] = -0.01
if 'iteration_limit' not in ptdf_options:
ptdf_options['iteration_limit'] = 100000
if 'lp_iteration_limit' not in ptdf_options:
ptdf_options['lp_iteration_limit'] = 100
if 'max_violations_per_iteration' not in ptdf_options:
ptdf_options['max_violations_per_iteration'] = 5
if 'lazy' not in ptdf_options:
ptdf_options['lazy'] = True
if 'branch_kv_threshold' not in ptdf_options:
ptdf_options['branch_kv_threshold'] = None
if 'kv_threshold_type' not in ptdf_options:
ptdf_options['kv_threshold_type'] = 'one'
if 'pre_lp_iteration_limit' not in ptdf_options:
ptdf_options['pre_lp_iteration_limit'] = 100
if 'active_flow_tol' not in ptdf_options:
ptdf_options['active_flow_tol'] = 50.
if 'lp_cleanup_phase' not in ptdf_options:
ptdf_options['lp_cleanup_phase'] = True
return ptdf_options
def check_and_scale_ptdf_options(ptdf_options, baseMVA):
## scale to base MVA
ptdf_options['abs_ptdf_tol'] /= baseMVA
ptdf_options['abs_flow_tol'] /= baseMVA
ptdf_options['active_flow_tol'] /= baseMVA
## lowercase keyword options
ptdf_options['kv_threshold_type'] = ptdf_options['kv_threshold_type'].lower()
rel_flow_tol = ptdf_options['rel_flow_tol']
abs_flow_tol = ptdf_options['abs_flow_tol']
rel_ptdf_tol = ptdf_options['rel_ptdf_tol']
abs_ptdf_tol = ptdf_options['abs_ptdf_tol']
lazy_rel_flow_tol = ptdf_options['lazy_rel_flow_tol']
max_violations_per_iteration = ptdf_options['max_violations_per_iteration']
if max_violations_per_iteration < 1 or (not isinstance(max_violations_per_iteration, int)):
raise Exception("max_violations_per_iteration must be an integer least 1, max_violations_per_iteration={}".format(max_violations_per_iteration))
if abs_flow_tol < lazy_rel_flow_tol:
raise Exception("abs_flow_tol (when scaled by baseMVA) cannot be less than lazy_flow_tol"
" abs_flow_tol={0}, lazy_rel_flow_tol={1}, baseMVA={2}".format(abs_flow_tol*baseMVA, lazy_rel_flow_tol, baseMVA))
if ptdf_options['kv_threshold_type'] not in ['one', 'both']:
raise Exception("kv_threshold_type must be either 'one' (for at least one end of the line"
" above branch_kv_threshold) or 'both' (for both end of the line above"
" branch_kv_threshold), kv_threshold_type={}".format(ptdf_options['kv_threshold_type']))
if abs_flow_tol < 1e-6:
logger.warning("WARNING: abs_flow_tol={0}, which is below the numeric threshold of most solvers.".format(abs_flow_tol*baseMVA))
if abs_flow_tol < rel_ptdf_tol*10:
logger.warning("WARNING: abs_flow_tol={0}, rel_ptdf_tol={1}, which will likely result in violations. Consider raising abs_flow_tol or lowering rel_ptdf_tol.".format(abs_flow_tol*baseMVA, rel_ptdf_tol))
if rel_ptdf_tol < 1e-6:
logger.warning("WARNING: rel_ptdf_tol={0}, which is low enough it may cause numerical issues in the solver. Consider rasing rel_ptdf_tol.".format(rel_ptdf_tol))
if abs_ptdf_tol < 1e-12:
logger.warning("WARNING: abs_ptdf_tol={0}, which is low enough it may cause numerical issues in the solver. Consider rasing abs_ptdf_tol.".format(abs_ptdf_tol*baseMVA))
class _LazyViolations(abc.Sized):
def __init__(self, branch_lazy_violations,
interface_lazy_violations=None,
contingency_lazy_violations=None):
self._branch_lazy_violations = branch_lazy_violations
if interface_lazy_violations is None:
self._interface_lazy_violations = set()
else:
self._interface_lazy_violations = interface_lazy_violations
if contingency_lazy_violations is None:
self._contingency_lazy_violations = set()
else:
self._contingency_lazy_violations = contingency_lazy_violations
def __len__(self):
return len(self._branch_lazy_violations) \
+ len(self._interface_lazy_violations) \
+ len(self._contingency_lazy_violations)
@property
def branch_lazy_violations(self):
return self._branch_lazy_violations
@property
def interface_lazy_violations(self):
return self._interface_lazy_violations
@property
def contingency_lazy_violations(self):
return self._contingency_lazy_violations
class _CalculatedFlows:
def __init__(self, PFV=None, PFV_I=None):
self._PFV = PFV
self._PFV_I = PFV_I
@property
def PFV(self):
return self._PFV
@property
def PFV_I(self):
return self._PFV_I
class _MaximalViolationsStore:
def __init__(self, max_viol_add, md, prepend_str, time=None):
self.max_viol_add = max_viol_add
self.baseMVA = md.data['system']['baseMVA']
self.time = time
self.prepend_str = prepend_str
self.violations_store = {}
self.total_violations = 0
self.monitored_violations = 0
def get_violations_named(self, name):
for key in self.violations_store:
if key[0] == name:
yield key[1]
def min_flow_violation(self):
if self.violations_store:
return min(self.violations_store.values())
else:
return 0.
def _min_violation_key(self):
d = self.violations_store
return min(d, key=d.get)
def _add_violation(self, name, other_name, index, val):
if other_name:
key = ( name, (other_name, index) )
else:
key = ( name, index )
self.violations_store[key] = val
# keep the violations_store <= self.max_viol_add
if len(self.violations_store) > self.max_viol_add:
min_key = self._min_violation_key()
if min_key == key:
raise RuntimeError(f"Circular condition: added {key} to violations_store "
f"with value {self.violations_store[min_key]} only to delete it. "
f"violations_store: {self.violations_store}")
del self.violations_store[min_key]
def _add_violations( self, name, other_name, viol_array, viol_indices):
while viol_indices:
idx = np.argmax(viol_array[viol_indices])
val = viol_array[viol_indices[idx]]
if val < self.min_flow_violation() and len(self.violations_store) >= self.max_viol_add:
break
# If this violation is close in value to
# one already in the set, it is likely
# to be a parallel constraint.
# If we haven't added any constraints yet
# any(()) is False, so this won't fire
# TODO: since this object gets re-created each iteration,
# and the violations_store just has violations we're
# adding *this* iteration, it's possible to add parallel
# lines, which may not be necessary in may cases (e.g.,
# when the line is binding but not over the limit)
close_to_existing = any( math.isclose( val, existing ) for existing in self.violations_store.values() )
if close_to_existing:
viol_indices.pop(idx)
continue
self._add_violation( name, other_name, viol_indices[idx], val )
viol_indices.pop(idx)
def check_and_add_violations(self, name, flow_array, flow_variable,
upper_lazy_limits, upper_enforced_limits,
lower_lazy_limits, lower_enforced_limits,
monitored_indices, index_names, outer_name=None, PFV=None):
if outer_name:
# contingencies are named by cn, branch_idx, reduce to
# branch_idx for this function
monitored_indices = set(idx[1] for idx in monitored_indices if idx[0] == outer_name)
## check upper bound
upper_viol_lazy_array = flow_array - upper_lazy_limits
## get the indices of the violation
## here filter by least violation in violations_store
## in the limit, this will become 0 eventually --
upper_viol_lazy_idx = np.nonzero(upper_viol_lazy_array > self.min_flow_violation())[0]
upper_viol_array = flow_array[upper_viol_lazy_idx] - upper_enforced_limits[upper_viol_lazy_idx]
self._calculate_total_and_monitored_violations(upper_viol_array, upper_viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, upper_enforced_limits,
name, outer_name, PFV)
## viol_lazy_idx will hold the lines we're adding
## this iteration -- don't want to add lines
## that are already in the monitored set
# eliminate lines in the monitored set
upper_viol_lazy_idx = list(set(upper_viol_lazy_idx).difference(monitored_indices))
self._add_violations( name, outer_name, upper_viol_lazy_array, upper_viol_lazy_idx )
## check lower bound
lower_viol_lazy_array = lower_lazy_limits - flow_array
## get the indices of the violation
## here filter by least violation in violations_store
## in the limit, this will become 0 eventually --
lower_viol_lazy_idx = np.nonzero(lower_viol_lazy_array > self.min_flow_violation())[0]
lower_viol_array = lower_enforced_limits[lower_viol_lazy_idx] - flow_array[lower_viol_lazy_idx]
self._calculate_total_and_monitored_violations(lower_viol_array, lower_viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, lower_enforced_limits,
name, outer_name, PFV)
## viol_lazy_idx will hold the lines we're adding
## this iteration -- don't want to add lines
## that are already in the monitored set
# eliminate lines in the monitored set
lower_viol_lazy_idx = list(set(lower_viol_lazy_idx).difference(monitored_indices))
self._add_violations( name, outer_name, lower_viol_lazy_array, lower_viol_lazy_idx )
def _calculate_total_and_monitored_violations(self, viol_array, viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, limits,
name, outer_name, other_flows ):
## viol_idx_idx will be indexed by viol_lazy_idx
viol_idx_idx = np.nonzero(viol_array > 0)[0]
viol_idx = frozenset(viol_lazy_idx[viol_idx_idx])
self.total_violations += len(viol_idx)
viol_in_mb = viol_idx.intersection(monitored_indices)
self.monitored_violations += len(viol_in_mb)
for i in viol_in_mb:
element_name = index_names[i]
thermal_limit = limits[i]
flow = flow_array[i]
if outer_name:
element_name = (outer_name, element_name)
thermal_limit += other_flows[i]
flow += other_flows[i]
logger.info(self.prepend_str+_generate_flow_viol_warning(flow_variable, name, element_name, flow, thermal_limit, self.baseMVA, self.time))
## useful debugging code
if logger.level <= logging.DEBUG:
for i in monitored_indices:
element_name = index_names[i]
thermal_limit = limits[i]
flow = flow_array[i]
if outer_name:
element_name = (outer_name, element_name)
thermal_limit += other_flows[i]
flow += other_flows[i]
print(f'contingency: {element_name[0]}, branch: {element_name[1]}')
print(f'delta: {flow_array[i]}')
print(f'base : {other_flows[i]}')
print(f'flow : {flow_array[i]+other_flows[i]}')
print(f'model: {pyo.value(flow_variable[element_name])}')
if not math.isclose(pyo.value(flow_variable[element_name]), flow_array[i]+other_flows[i]):
print(f'contingency: {element_name[0]}, branch_idx: {i}')
diff = pyo.value(flow_variable[element_name]) - (flow_array[i]+other_flows[i])
print(f'ABSOLUTE DIFFERENCE: { abs(diff) }')
flow_variable[element_name].pprint()
raise Exception()
print('')
else:
print(f'{name}: {element_name}')
print(f'flow : {flow_array[i]}')
print(f'model: {pyo.value(flow_variable[element_name])}')
print('')
## to hold the indicies of the violations
## in the model or block
def add_monitored_flow_tracker(mb):
mb._idx_monitored = list()
mb._interfaces_monitored = list()
mb._contingencies_monitored = list()
# add these if there are no slacks
# so we don't have to check later
# for these attributes
if not hasattr(mb, 'pf_slack_pos'):
mb.pf_slack_pos = pyo.Var([], dense=False)
if not hasattr(mb, 'pfi_slack_pos'):
mb.pfi_slack_pos = pyo.Var([], dense=False)
if not hasattr(mb, 'pfc_slack_pos'):
mb.pfc_slack_pos = pyo.Var([], dense=False)
## violation checker
def check_violations(mb, md, PTDF, max_viol_add, time=None, prepend_str=""):
if time is None: # DCOPF
active_slack_tol = mb._ptdf_options['active_flow_tol']
else: # Unit Commitment
active_slack_tol = mb.parent_block()._ptdf_options['active_flow_tol']
## PFV -- power flow vector
## PFV_I -- interface power flow vector
## VA -- bus voltage angle vector
PFV, PFV_I, VA = PTDF.calculate_masked_PFV(mb)
violations_store = _MaximalViolationsStore(max_viol_add=max_viol_add, md=md, time=time, prepend_str=prepend_str)
if len(PTDF.branches_keys_masked) > 0:
violations_store.check_and_add_violations('branch', PFV, mb.pf,
PTDF.lazy_branch_limits, PTDF.enforced_branch_limits,
-PTDF.lazy_branch_limits, -PTDF.enforced_branch_limits,
mb._idx_monitored, PTDF.branches_keys_masked)
if len(PTDF.interface_keys) > 0:
violations_store.check_and_add_violations('interface', PFV_I, mb.pfi,
PTDF.lazy_interface_max_limits, PTDF.enforced_interface_max_limits,
PTDF.lazy_interface_min_limits, PTDF.enforced_interface_min_limits,
mb._interfaces_monitored, PTDF.interface_keys)
if PTDF.contingencies and \
violations_store.total_violations == 0:
## NOTE: checking contingency constraints in general could be very expensive
## we probably want to delay doing so until we have a nearly transmission feasible
## solution
## For each contingency, we'll only calculate the difference in flow,
## and check this against the difference in bounds, i.e.,
## power_flow_contingency == PFV + PFV_delta_c
## -rate_c <= power_flow_contingency <= +rate_c
## <===>
## -rate_c - PFV <= PFV_delta_c <= +rate_c - PFV
## <===>
## contingency_limits_lower <= PFV_delta_c <= contingency_limits_upper
## and
## contingency_limits_lower == -rate_c - PFV; contingency_limits_upper == rate_c - PFV
## In this way, we avoid (number of contingenies) adds PFV+PFV_delta_c
logger.debug("Checking contingency flows...")
lazy_contingency_limits_upper = PTDF.lazy_contingency_limits - PFV
lazy_contingency_limits_lower = -PTDF.lazy_contingency_limits - PFV
enforced_contingency_limits_upper = PTDF.enforced_contingency_limits - PFV
enforced_contingency_limits_lower = -PTDF.enforced_contingency_limits - PFV
for cn in PTDF.contingency_compensators:
PFV_delta = PTDF.calculate_masked_PFV_delta(cn, PFV, VA)
violations_store.check_and_add_violations('contingency', PFV_delta, mb.pfc,
lazy_contingency_limits_upper, enforced_contingency_limits_upper,
lazy_contingency_limits_lower, enforced_contingency_limits_lower,
mb._contingencies_monitored, PTDF.branches_keys_masked,
outer_name = cn, PFV = PFV)
logger.debug(f"branches_monitored: {mb._idx_monitored}\n"
f"interfaces_monitored: {mb._interfaces_monitored}\n"
f"contingencies_monitored: {mb._contingencies_monitored}\n"
f"Violations being added: {violations_store.violations_store}\n"
f"Violations in model: {violations_store.monitored_violations}\n")
viol_lazy = _LazyViolations(branch_lazy_violations=set(violations_store.get_violations_named('branch')),
interface_lazy_violations=set(violations_store.get_violations_named('interface')),
contingency_lazy_violations=set(violations_store.get_violations_named('contingency')))
flows = _CalculatedFlows(PFV=PFV, PFV_I=PFV_I)
return flows, violations_store.total_violations, violations_store.monitored_violations, viol_lazy
def _generate_flow_monitor_remove_message(flow_type, bn, slack, baseMVA, time):
ret_str = "removing {0} {1} from monitored set".format(flow_type, bn)
if time is not None:
ret_str += " at time {}".format(time)
ret_str += ", flow slack={0}".format(slack*baseMVA)
return ret_str
## flow constraint remover
def remove_inactive(mb, solver, time=None, prepend_str=""):
if time is None: # DCOPF
model = mb
else: # UC
model = mb.parent_block()
PTDF = mb._PTDF
ptdf_options = model._ptdf_options
baseMVA = model.model_data.data['system']['baseMVA']
slack_tol = ptdf_options['active_flow_tol']
persistent_solver = isinstance(solver, PersistentSolver)
## get the lines we're monitoring
idx_monitored = mb._idx_monitored
interfaces_monitored = mb._interfaces_monitored
contingencies_monitored = mb._contingencies_monitored
## get the branchnname to index map
branchname_index_map = PTDF.branchname_to_index_masked_map
interfacename_index_map = PTDF.interfacename_to_index_map
## branches
branches = model.model_data.data['elements']['branch']
interfaces = model.model_data.data['elements']['interface']
# list of tuples -- each tuple is ( key, indexed_constraint, constraint_data )
constr_to_remove = list()
for bn, constr in mb.ineq_pf_branch_thermal_bounds.items():
## don't take out branches we were told to monitor
if 'lazy' in branches[bn] and not branches[bn]['lazy']:
continue
slack = constr.slack()
if slack_tol <= abs(slack):
logger.debug(prepend_str+_generate_flow_monitor_remove_message('branch', bn, abs(slack), baseMVA, time))
constr_to_remove.append((bn, mb.ineq_pf_branch_thermal_bounds, constr))
## remove the index from the lines we're monitoring
idx_monitored.remove(branchname_index_map[bn])
for i_n, constr in mb.ineq_pf_interface_bounds.items():
## don't take out branches we were told to monitor
if 'lazy' in interfaces[i_n] and not interfaces[i_n]['lazy']:
continue
slack = constr.slack()
if slack_tol <= abs(slack):
logger.debug(prepend_str+_generate_flow_monitor_remove_message('interface', i_n, abs(slack), baseMVA, time))
constr_to_remove.append((i_n, mb.ineq_pf_interface_bounds, constr))
## remove the index from the lines we're monitoring
interfaces_monitored.remove(interfacename_index_map[i_n])
for name, constr in mb.ineq_pf_contingency_branch_thermal_bounds.items():
slack = constr.slack()
if slack_tol <= abs(slack):
logger.debug(prepend_str+_generate_flow_monitor_remove_message('contingeny', name, abs(slack), baseMVA, time))
constr_to_remove.append((name, mb.ineq_pf_contingency_branch_thermal_bounds, constr))
## remove the index from the lines we're monitoring
contingencies_monitored.remove((name[0], branchname_index_map[name[1]])) ## TODO: name?
msg = prepend_str+"removing {} inactive transmission constraint(s)".format(len(constr_to_remove))
if time is not None:
msg += " at time {}".format(time)
logger.debug(msg)
for key, indexed_constraint, constr_data in constr_to_remove:
if persistent_solver:
solver.remove_constraint(constr_data)
del indexed_constraint[key]
return len(constr_to_remove)
def _generate_flow_viol_warning(expr, e_type, bn, flow, limit, baseMVA, time):
ret_str = "WARNING: {0} {1} is in the monitored set".format(e_type,bn)
if time is not None:
ret_str += " at time {}".format(time)
ret_str += ", but flow exceeds limit!!\n\t flow={:.2f}, limit={:.2f}".format(flow*baseMVA, limit*baseMVA)
ret_str += ", model_flow={:.2f}".format(pyo.value(expr[bn])*baseMVA)
return ret_str
def _generate_flow_monitor_message(e_type, bn, flow=None, lower_limit=None, upper_limit=None, baseMVA=None, time=None):
ret_str = "adding {0} {1} to monitored set".format(e_type, bn)
if time is not None:
ret_str += " at time {}".format(time)
if flow is not None:
ret_str += ", flow={0}, lower limit={1}, upper limit={2}".format(flow*baseMVA, lower_limit*baseMVA, upper_limit*baseMVA)
return ret_str
## helper for generating pf
def _iter_over_viol_set(viol_set, mb, PTDF, abs_ptdf_tol, rel_ptdf_tol):
for i in viol_set:
bn = PTDF.branches_keys_masked[i]
if mb.pf[bn].expr is None:
expr = libbranch.get_power_flow_expr_ptdf_approx(mb, bn, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol)
mb.pf[bn] = expr
yield i, bn
## helper for generating pfi
def _iter_over_int_viol_set(int_viol_set, mb, PTDF, abs_ptdf_tol, rel_ptdf_tol):
for i in int_viol_set:
i_n = PTDF.interface_keys[i]
if mb.pfi[i_n].expr is None:
expr = libbranch.get_power_flow_interface_expr_ptdf(mb, i_n, PTDF, abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol)
mb.pfi[i_n] = expr
yield i, i_n
## helper for generating pfc
def _iter_over_cont_viol_set(cont_viol_set, mb, PTDF, abs_ptdf_tol, rel_ptdf_tol):
for (cn, i_b) in cont_viol_set:
bn = PTDF.branches_keys_masked[i_b]
if (cn, bn) not in mb._contingency_set:
mb._contingency_set.add((cn,bn))
if mb.pfc[cn, bn].expr is None:
expr = libbranch.get_contingency_power_flow_expr_ptdf_approx(mb, cn, bn, PTDF,
abs_ptdf_tol=abs_ptdf_tol, rel_ptdf_tol=rel_ptdf_tol)
mb.pfc[cn, bn] = expr
yield cn, bn, i_b
def _generate_branch_thermal_bounds(mb, bn, thermal_limit):
if bn in mb.pf_slack_pos.index_set():
if bn not in mb.pf_slack_pos:
neg_slack = mb.pf_slack_neg[bn]
pos_slack = mb.pf_slack_pos[bn]
assert len(mb.pf_slack_pos) == len(mb.pf_slack_neg)
new_var = True
else: # the constraint could have been added and removed
neg_slack = mb.pf_slack_neg[bn]
pos_slack = mb.pf_slack_pos[bn]
new_var = False
# initialize to 0.
neg_slack.value = 0.
pos_slack.value = 0.
else:
neg_slack = None
pos_slack = None
new_var = False
return libbranch.generate_thermal_bounds(mb.pf[bn], -thermal_limit, thermal_limit, neg_slack, pos_slack), new_var
def _generate_interface_bounds(mb, i_n, minimum_limit, maximum_limit):
if i_n in mb.pfi_slack_pos.index_set():
if i_n not in mb.pfi_slack_pos:
neg_slack = mb.pfi_slack_neg[i_n]
pos_slack = mb.pfi_slack_pos[i_n]
assert len(mb.pfi_slack_pos) == len(mb.pfi_slack_neg)
new_var = True
else: # the constraint could have been added and removed
neg_slack = mb.pfi_slack_neg[i_n]
pos_slack = mb.pfi_slack_pos[i_n]
new_var = False
# initialize to 0.
neg_slack.value = 0.
pos_slack.value = 0.
else:
neg_slack = None
pos_slack = None
new_var = False
return libbranch.generate_thermal_bounds(mb.pfi[i_n], minimum_limit, maximum_limit, neg_slack, pos_slack), new_var
def _generate_contingency_bounds(mb, cn, minimum_limit, maximum_limit):
if cn in mb.pfc_slack_pos.index_set():
if cn not in mb.pfc_slack_pos:
neg_slack = mb.pfc_slack_neg[cn]
pos_slack = mb.pfc_slack_pos[cn]
assert len(mb.pfc_slack_pos) == len(mb.pfc_slack_neg)
new_var = True
else: # the constraint could have been added and removed
neg_slack = mb.pfc_slack_neg[cn]
pos_slack = mb.pfc_slack_pos[cn]
new_var = False
# initialize to 0.
neg_slack.value = 0.
pos_slack.value = 0.
else:
neg_slack = None
pos_slack = None
new_var = False
return libbranch.generate_thermal_bounds(mb.pfc[cn], minimum_limit, maximum_limit, neg_slack, pos_slack), new_var
## violation adder
def add_violations(lazy_violations, flows, mb, md, solver, ptdf_options,
PTDF, time=None, prepend_str="", obj_multi=None):
# obj_multi is for models where this
# UC instance is part of a larger model
if time is None:
model = mb
else:
model = mb.parent_block()
baseMVA = md.data['system']['baseMVA']
persistent_solver = isinstance(solver, PersistentSolver)
## static information between runs
rel_ptdf_tol = ptdf_options['rel_ptdf_tol']
abs_ptdf_tol = ptdf_options['abs_ptdf_tol']
constr = mb.ineq_pf_branch_thermal_bounds
viol_in_mb = mb._idx_monitored
for i, bn in _iter_over_viol_set(lazy_violations.branch_lazy_violations, mb, PTDF, abs_ptdf_tol, rel_ptdf_tol):
thermal_limit = PTDF.branch_limits_array_masked[i]
if flows.PFV is None:
logger.debug(prepend_str+_generate_flow_monitor_message('branch', bn, time=time))
else:
logger.debug(prepend_str+_generate_flow_monitor_message('branch', bn, flows.PFV[i], -thermal_limit, thermal_limit, baseMVA, time))
constr[bn], new_slacks = _generate_branch_thermal_bounds(mb, bn, thermal_limit)
viol_in_mb.append(i)
if new_slacks:
m = model
obj_coef = m.TimePeriodLengthHours*m.BranchLimitPenalty[bn]
if persistent_solver:
m_model = m.model()
if m is not m_model and obj_multi is None:
raise RuntimeError("Cannot add lazy var for branch slacks if part of a larger model")
if obj_multi is not None:
obj_coef = obj_multi*obj_coef
## update the objective through the add_column method
solver.add_column(m_model, mb.pf_slack_pos[bn], obj_coef, [], [])
solver.add_column(m_model, mb.pf_slack_neg[bn], obj_coef, [], [])
else:
m.BranchViolationCost[time].expr += ( obj_coef*mb.pf_slack_pos[bn] + \
obj_coef*mb.pf_slack_neg[bn] )
if persistent_solver:
solver.add_constraint(constr[bn])
_add_interface_violations(lazy_violations, flows, mb, md, solver, ptdf_options,
PTDF, model, baseMVA, persistent_solver, rel_ptdf_tol, abs_ptdf_tol,
time, prepend_str, obj_multi)
_add_contingency_violations(lazy_violations, flows, mb, md, solver, ptdf_options,
PTDF, model, baseMVA, persistent_solver, rel_ptdf_tol, abs_ptdf_tol,
time, prepend_str, obj_multi)
def _add_interface_violations(lazy_violations, flows, mb, md, solver, ptdf_options,
PTDF, model, baseMVA, persistent_solver, rel_ptdf_tol, abs_ptdf_tol,
time, prepend_str, obj_multi):
## in case there's no interfaces
if not hasattr(mb, 'ineq_pf_interface_bounds'):
return
constr = mb.ineq_pf_interface_bounds
int_viol_in_mb = mb._interfaces_monitored
for i, i_n in _iter_over_int_viol_set(lazy_violations.interface_lazy_violations, mb, PTDF, abs_ptdf_tol, rel_ptdf_tol):
minimum_limit = PTDF.interface_min_limits[i]
maximum_limit = PTDF.interface_max_limits[i]
if flows.PFV_I is None:
logger.debug(prepend_str+_generate_flow_monitor_message('interface', i_n, time=time))
else:
logger.debug(prepend_str+_generate_flow_monitor_message('interface', i_n, flows.PFV_I[i], minimum_limit, maximum_limit, baseMVA, time))
constr[i_n], new_slacks = _generate_interface_bounds(mb, i_n, minimum_limit, maximum_limit)
int_viol_in_mb.append(i)
if new_slacks:
m = model
obj_coef = m.TimePeriodLengthHours*m.InterfaceLimitPenalty[i_n]
if persistent_solver:
m_model = m.model()
if m is not m_model and obj_multi is None:
raise RuntimeError("Cannot add lazy var for interface slacks if part of a larger model")
if obj_multi is not None:
obj_coef = obj_multi*obj_coef
## update the objective through the add_column method
solver.add_column(m_model, mb.pfi_slack_pos[i_n], obj_coef, [], [])
solver.add_column(m_model, mb.pfi_slack_neg[i_n], obj_coef, [], [])
else:
m.InterfaceViolationCost[time].expr += (obj_coef*mb.pfi_slack_pos[i_n] + \
obj_coef*mb.pfi_slack_neg[i_n] )
if persistent_solver:
solver.add_constraint(constr[i_n])
def _add_contingency_violations(lazy_violations, flows, mb, md, solver, ptdf_options,
PTDF, model, baseMVA, persistent_solver, rel_ptdf_tol, abs_ptdf_tol,
time, prepend_str, obj_multi):
## in case there's no contingencies
if not hasattr(mb, 'ineq_pf_contingency_branch_thermal_bounds'):
return
constr = mb.ineq_pf_contingency_branch_thermal_bounds
contingencies_monitored = mb._contingencies_monitored
for cn, bn, i_b in _iter_over_cont_viol_set(lazy_violations.contingency_lazy_violations, mb, PTDF, abs_ptdf_tol, rel_ptdf_tol):
emergency_thermal_limit = PTDF.contingency_limits_array_masked[i_b]
logger.debug(prepend_str+_generate_flow_monitor_message('contingency', (cn,bn), time=time))
constr[cn,bn], new_slacks = _generate_contingency_bounds(mb, (cn,bn), -emergency_thermal_limit, emergency_thermal_limit)
contingencies_monitored.append((cn, i_b))
if new_slacks:
m = model
obj_coef = m.TimePeriodLengthHours*m.SystemContingencyLimitPenalty
if persistent_solver:
m_model = m.model()
if m is not m_model and obj_multi is None:
raise RuntimeError("Cannot add lazy var for branch contingency slacks if part of a larger model")
if obj_multi is not None:
obj_coef = obj_multi*obj_coef
## update the objective through the add_column method
solver.add_column(m_model, mb.pfc_slack_pos[cn,bn], obj_coef, [], [])
solver.add_column(m_model, mb.pfc_slack_neg[cn,bn], obj_coef, [], [])
else:
m.ContingencyViolationCost[time].expr += (obj_coef*mb.pfc_slack_pos[cn,bn] + \
obj_coef*mb.pfc_slack_neg[cn,bn] )
if persistent_solver:
solver.add_constraint(constr[cn,bn])
def add_initial_monitored_constraints(mb, md, branches_in_service, ptdf_options, PTDF, time=None):
viol_not_lazy = set()
for bn in branches_in_service:
branch = md.data['elements']['branch'][bn]
if 'lazy' in branch and not branch['lazy']:
if bn in PTDF.branchname_to_index_masked_map:
viol_not_lazy.add(PTDF.branchname_to_index_masked_map[bn])
else:
logger.warning("Branch {0} has flag 'lazy' set to False but is excluded from monitored set based on kV limits".format(bn))
int_viol_not_lazy = set()
if 'interface' in md.data['elements']:
for i_n, interface in md.data['elements']['interface'].items():
if 'lazy' in interface and not interface['lazy']:
int_viol_not_lazy.add(PTDF.interfacename_to_index_map[i_n])
# not easy to support in the current
# set-up, as 'lazy' would need to be
# set on a branch, branch basis
cont_viol_not_lazy = set()
#blank flows
flows = _CalculatedFlows()
lazy_violations = _LazyViolations(branch_lazy_violations=viol_not_lazy,
interface_lazy_violations=int_viol_not_lazy,
contingency_lazy_violations=cont_viol_not_lazy)
add_violations(lazy_violations, flows, mb, md, None,
ptdf_options, PTDF, time=time, prepend_str="[Initial Set] ", obj_multi=None)
def copy_active_to_next_time(m, b_next, PTDF_next, slacks, slacks_I, slacks_C):
active_slack_tol = m._ptdf_options['active_flow_tol']
branchname_index_map = PTDF_next.branchname_to_index_masked_map
interfacename_index_map = PTDF_next.interfacename_to_index_map
viol_lazy = set()
int_viol_lazy = set()
cont_viol_lazy = set()
idx_monitored = b_next._idx_monitored
interfaces_monitored = b_next._interfaces_monitored
contingencies_monitored = b_next._contingencies_monitored
for bn, slack in slacks.items():
if abs(slack) <= active_slack_tol:
## in case the topology has changed
if bn in branchname_index_map:
idx = branchname_index_map[bn]
if idx not in idx_monitored:
viol_lazy.add(idx)
for i_n, slack in slacks_I.items():
if abs(slack) <= active_slack_tol:
## in case the topology has changed
if i_n in interfacename_index_map:
idx = interfacename_index_map[i_n]
if idx not in interfaces_monitored:
int_viol_lazy.add(idx)
for cn, slack in slacks_C.items():
if abs(slack) <= active_slack_tol:
## in case the topology has changed
c, bn = cn
if bn in branchname_index_map:
bi = branchname_index_map[bn]
if (c, bi) not in contingencies_monitored:
cont_viol_lazy.add((c, bi))
flows = _CalculatedFlows()
viol_lazy = _LazyViolations(branch_lazy_violations=viol_lazy,
interface_lazy_violations=int_viol_lazy,
contingency_lazy_violations=cont_viol_lazy)
return flows, viol_lazy
def _binary_var_generator(instance):
regulation = bool(instance.regulation_service)
if instance.status_vars in ['CA_1bin_vars', 'garver_3bin_vars', 'garver_2bin_vars', 'garver_3bin_relaxed_stop_vars']:
yield instance.UnitOn
if instance.status_vars in ['ALS_state_transition_vars']:
yield instance.UnitStayOn
if instance.status_vars in ['garver_3bin_vars', 'garver_2bin_vars', 'garver_3bin_relaxed_stop_vars', 'ALS_state_transition_vars']:
yield instance.UnitStart
if instance.status_vars in ['garver_3bin_vars', 'ALS_state_transition_vars']:
yield instance.UnitStop
if regulation:
yield instance.RegulationOn
yield instance.OutputStorage
yield instance.InputStorage
if instance.startup_costs in ['KOW_startup_costs']:
yield instance.StartupIndicator
elif instance.startup_costs in ['MLR_startup_costs', 'MLR_startup_costs2',]:
yield instance.delta
def uc_instance_binary_relaxer(model, solver):
persistent_solver = isinstance(solver, PersistentSolver)
for ivar in _binary_var_generator(model):
ivar.domain = pyo.UnitInterval
if persistent_solver:
for k in ivar:
solver.update_var(ivar[k])
def uc_instance_binary_enforcer(model, solver):
persistent_solver = isinstance(solver, PersistentSolver)
for ivar in _binary_var_generator(model):
ivar.domain = pyo.Binary
if persistent_solver:
for k in ivar:
solver.update_var(ivar[k])
def _load_pf_slacks(solver, m, t_subset):
## ensure the slack variables are loaded
vars_to_load = []
for t in t_subset:
b = m.TransmissionBlock[t]
vars_to_load.extend(b.pf_slack_pos.values())
vars_to_load.extend(b.pf_slack_neg.values())
vars_to_load.extend(b.pfi_slack_pos.values())
vars_to_load.extend(b.pfi_slack_neg.values())
vars_to_load.extend(b.pfc_slack_pos.values())
vars_to_load.extend(b.pfc_slack_neg.values())
# XpressPersistent raises an exception if
# this list is empty
if vars_to_load:
solver.load_vars(vars_to_load)
|
py | 7dfe4d809febd4292d60d4cc0e8ac1219964524c | # -*- coding: utf-8 -*-
"""
const
~~~~~
Implements CONSTS
:author: Feei <[email protected]>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
# Match-Mode
mm_function_param_controllable = 'function-param-regex' # 函数正则匹配
mm_regex_param_controllable = 'vustomize-match' # 自定义匹配
mm_regex_only_match = 'only-regex'
mm_regex_return_regex = 'regex-return-regex'
match_modes = [
mm_regex_only_match,
mm_regex_param_controllable,
mm_function_param_controllable,
mm_regex_return_regex,
]
#
# Function-Param-Controllable
#
# (?:eval|call_function)\s*\((.*)(?:\))
# eval ($test + $test2);
# call_function ($exp);
#
fpc = '\s*\((.*)(?:\))'
fpc_single = '[f]{fpc}'.format(fpc=fpc)
fpc_multi = '(?:[f]){fpc}'.format(fpc=fpc)
#
# Find All variables
#
# Hallo $var. blabla $var, $iam a var $varvarvar gfg djf jdfgjh fd $variable $_GET['req']
#
fav = '\$([a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*)'
|
py | 7dfe4e4c8c706b7cab093612183475a6a2a4f673 | # Generated by Django 3.2.2 on 2021-06-03 16:51
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0007_auto_20210531_1547'),
]
operations = [
migrations.AddField(
model_name='user',
name='is_super',
field=models.BooleanField(default=False, verbose_name='是否是管理员'),
),
migrations.AlterField(
model_name='user',
name='birthday',
field=models.DateField(blank=True, default=datetime.date(2021, 6, 3), verbose_name='生日'),
),
migrations.AlterField(
model_name='user',
name='credit_rating',
field=models.CharField(choices=[('5', 'AAA'), ('1', 'C'), ('3', 'A'), ('4', 'AA'), ('2', 'B')], default='3', max_length=10, verbose_name='信用等级'),
),
]
|
py | 7dfe4ea10ee7fa9fa56f4b1bd564d88362c55247 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import logging
import torch
from schema import And, Optional
from nni.compression.torch.utils.config_validation import CompressorSchema
from nni.compression.torch.compressor import Pruner
from .finegrained_pruning import LevelPrunerMasker
logger = logging.getLogger('torch pruner')
class LotteryTicketPruner(Pruner):
"""
Parameters
----------
model : pytorch model
The model to be pruned
config_list : list
Supported keys:
- prune_iterations : The number of rounds for the iterative pruning.
- sparsity : The final sparsity when the compression is done.
optimizer : pytorch optimizer
The optimizer for the model
lr_scheduler : pytorch lr scheduler
The lr scheduler for the model if used
reset_weights : bool
Whether reset weights and optimizer at the beginning of each round.
"""
def __init__(self, model, config_list, optimizer=None, lr_scheduler=None, reset_weights=True):
# save init weights and optimizer
self.reset_weights = reset_weights
if self.reset_weights:
self._model = model
self._optimizer = optimizer
self._model_state = copy.deepcopy(model.state_dict())
self._optimizer_state = copy.deepcopy(optimizer.state_dict())
self._lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self._scheduler_state = copy.deepcopy(lr_scheduler.state_dict())
super().__init__(model, config_list, optimizer)
self.curr_prune_iteration = None
self.prune_iterations = config_list[0]['prune_iterations']
self.masker = LevelPrunerMasker(model, self)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- prune_iterations : The number of rounds for the iterative pruning.
- sparsity : The final sparsity when the compression is done.
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
'prune_iterations': And(int, lambda n: n > 0),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
assert len(set([x['prune_iterations'] for x in config_list])) == 1, 'The values of prune_iterations must be equal in your config'
def _calc_sparsity(self, sparsity):
keep_ratio_once = (1 - sparsity) ** (1 / self.prune_iterations)
curr_keep_ratio = keep_ratio_once ** self.curr_prune_iteration
return max(1 - curr_keep_ratio, 0)
def _calc_mask(self, wrapper, sparsity):
weight = wrapper.module.weight.data
if self.curr_prune_iteration == 0:
mask = {'weight_mask': torch.ones(weight.shape).type_as(weight)}
else:
curr_sparsity = self._calc_sparsity(sparsity)
mask = self.masker.calc_mask(sparsity=curr_sparsity, wrapper=wrapper)
return mask
def calc_mask(self, wrapper, **kwargs):
"""
Generate mask for the given ``weight``.
Parameters
----------
wrapper : Module
The layer to be pruned
Returns
-------
tensor
The mask for this weight, it is ```None``` because this pruner
calculates and assigns masks in ```prune_iteration_start```,
no need to do anything in this function.
"""
return None
def get_prune_iterations(self):
"""
Return the range for iterations.
In the first prune iteration, masks are all one, thus, add one more iteration
Returns
-------
list
A list for pruning iterations
"""
return range(self.prune_iterations + 1)
def prune_iteration_start(self):
"""
Control the pruning procedure on updated epoch number.
Should be called at the beginning of the epoch.
"""
if self.curr_prune_iteration is None:
self.curr_prune_iteration = 0
else:
self.curr_prune_iteration += 1
assert self.curr_prune_iteration < self.prune_iterations + 1, 'Exceed the configured prune_iterations'
modules_wrapper = self.get_modules_wrapper()
modules_to_compress = self.get_modules_to_compress()
for layer, config in modules_to_compress:
module_wrapper = None
for wrapper in modules_wrapper:
if wrapper.name == layer.name:
module_wrapper = wrapper
break
assert module_wrapper is not None
sparsity = config.get('sparsity')
mask = self._calc_mask(module_wrapper, sparsity)
# TODO: directly use weight_mask is not good
module_wrapper.weight_mask = mask['weight_mask']
# there is no mask for bias
# reinit weights back to original after new masks are generated
if self.reset_weights:
# should use this member function to reset model weights
self.load_model_state_dict(self._model_state)
self._optimizer.load_state_dict(self._optimizer_state)
if self._lr_scheduler is not None:
self._lr_scheduler.load_state_dict(self._scheduler_state)
|
py | 7dfe502f2d993e45e75b94f90c568b71c97b12fb | # encoding: UTF-8
import sys
import os
import pickle
import bz2
from functools import lru_cache
from logging import INFO, ERROR
from vnpy.trader.utility import load_json, save_json
# 期货的配置文件
TDX_FUTURE_CONFIG = 'tdx_future_config.json'
# 股票的配置文件
# 存储格式 dict{
# "cache_time": datetime,
# "symbol_dict": {
# "symbol_marketid": {
# 'code', '395001',
# 'volunit', 100,
# 'decimal_point', 2,
# 'name', '主板A股',
# 'pre_close', 458.0,
# 'exchagne','SZSE',
# 'stock_type', 'index_cn',
# 'market_id', 0
# }
# } }
TDX_STOCK_CONFIG = 'tdx_stock_config.pkb2'
TDX_PROXY_CONFIG = 'tdx_proxy_config.json'
@lru_cache()
def get_tdx_market_code(code):
# 获取通达信股票的market code
code = str(code)
if code[0] in ['5', '6', '9'] or code[:3] in ["880","009", "126", "110", "201", "202", "203", "204"]:
# 上海证券交易所
return 1
# 深圳证券交易所
return 0
# 通达信 K 线种类
# 0 - 5 分钟K 线
# 1 - 15 分钟K 线
# 2 - 30 分钟K 线
# 3 - 1 小时K 线
# 4 - 日K 线
# 5 - 周K 线
# 6 - 月K 线
# 7 - 1 分钟
# 8 - 1 分钟K 线
# 9 - 日K 线
# 10 - 季K 线
# 11 - 年K 线
PERIOD_MAPPING = {}
PERIOD_MAPPING['1min'] = 8
PERIOD_MAPPING['5min'] = 0
PERIOD_MAPPING['15min'] = 1
PERIOD_MAPPING['30min'] = 2
PERIOD_MAPPING['1hour'] = 3
PERIOD_MAPPING['1day'] = 4
PERIOD_MAPPING['1week'] = 5
PERIOD_MAPPING['1month'] = 6
# 期货行情服务器清单
TDX_FUTURE_HOSTS =[
{'ip': '42.193.151.197', 'port': 7727, 'name': '广州期货双线1', 'speed': 6.622},
{'ip': '119.29.63.178', 'port': 7727, 'name': '广州期货双线3', 'speed': 7.716},
{'ip': '81.71.76.101', 'port': 7727, 'name': '广州期货双线2', 'speed': 14.914},
{'ip': '47.107.75.159', 'port': 7727, 'name': '扩展市场深圳双线3', 'speed': 34.542},
{'ip': '112.74.214.43', 'port': 7727, 'name': '扩展市场深圳双线1', 'speed': 37.881},
{'ip': '59.175.238.38', 'port': 7727, 'name': '扩展市场武汉主站3', 'speed': 49.63},
{'ip': '119.97.185.5', 'port': 7727, 'name': '扩展市场武汉主站1', 'speed': 70.563},
{'ip': '218.80.248.229', 'port': 7721, 'name': '备用服务器1', 'speed': 86.91300000000001},
{'ip': '119.97.185.7', 'port': 7727, 'name': '港股期货武汉主站1', 'speed': 101.06099999999999},
{'ip': '106.14.95.149', 'port': 7727, 'name': '扩展市场上海双线', 'speed': 105.294},
{'ip': '113.105.142.136', 'port': 443, 'name': '扩展市场东莞主站', 'speed': 10000.0},
{'ip': '113.105.142.133', 'port': 443, 'name': '港股期货东莞电信', 'speed': 10000.0},
{'ip': '119.97.185.9', 'port': 7727, 'name': '港股期货武汉主站2', 'speed': 10000.0},
{'ip': '202.103.36.71', 'port': 443, 'name': '扩展市场武汉主站2', 'speed': 10000.0},
{'ip': '47.92.127.181', 'port': 7727, 'name': '扩展市场北京主站', 'speed': 10000.0},
{'ip': '124.74.236.94', 'port': 7721, 'name': '备用服务器2', 'speed': 10000.0},
{'ip': '58.246.109.27', 'port': 7721, 'name': '备用服务器3', 'speed': 10000.0}
]
def get_future_contracts():
"""获取期货合约信息"""
return get_cache_json('future_contracts.json')
def save_future_contracts(future_contracts_dict: dict):
"""保存期货合约信息"""
save_cache_json(future_contracts_dict, 'future_contracts.json')
def get_cache_config(config_file_name):
"""获取本地缓存的配置地址信息"""
config_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), config_file_name))
config = {}
if not os.path.exists(config_file_name):
return config
try:
with bz2.BZ2File(config_file_name, 'rb') as f:
config = pickle.load(f)
return config
except Exception as ex:
print(f'读取缓存本地文件:{config_file_name}异常{str(ex)}')
return config
def save_cache_config(data: dict, config_file_name):
"""保存本地缓存的配置地址信息"""
config_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), config_file_name))
with bz2.BZ2File(config_file_name, 'wb') as f:
pickle.dump(data, f)
def get_cache_json(json_file_name: str):
"""获取本地缓存的json配置信息"""
config_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), json_file_name))
return load_json(config_file_name)
def save_cache_json(data_dict: dict, json_file_name: str):
"""保存本地缓存的JSON配置信息"""
config_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), json_file_name))
save_json(filename=config_file_name, data=data_dict)
def get_stock_type(code,market_id = None ):
"""获取股票得分类"""
if market_id is None:
market_id = get_tdx_market_code(code)
if market_id == 0:
return get_stock_type_sz(code)
else:
return get_stock_type_sh(code)
def get_stock_type_sz(code):
"""深市代码分类
Arguments:
code {[type]} -- [description]
Returns:
[type] -- [description]
"""
if str(code)[0:2] in ['00', '30', '02']:
return 'stock_cn'
elif str(code)[0:2] in ['39']:
return 'index_cn'
elif str(code)[0:2] in ['15']:
return 'etf_cn'
elif str(code)[0:2] in ['10', '11', '13']:
# 10xxxx 国债现货
# 11xxxx 债券
# 12xxxx 国债回购
return 'bond_cn'
elif str(code)[0:2] in ['12']:
# 12xxxx 可转换债券
return 'cb_cn'
elif str(code)[0:2] in ['20']:
return 'stockB_cn'
else:
return 'undefined'
def get_stock_type_sh(code):
if str(code)[0] == '6':
return 'stock_cn'
elif str(code)[0:3] in ['000', '880']:
return 'index_cn'
elif str(code)[0:2] == '51':
return 'etf_cn'
# 110×××120×××企业债券;
# 129×××100×××可转换债券;
elif str(code)[0:3] in ["009", "112", '120', "132", "204"]:
return 'bond_cn'
elif str(code)[0:3] in ["110", "113", "121", "122", "126",
"130", "181", "190", "191", "192", "201", "202", "203"]:
return 'cb_cn'
else:
return 'undefined'
class FakeStrategy(object):
"""制作一个假得策略,用于测试"""
def write_log(self, content, level=INFO):
if level == INFO:
print(content)
else:
print(content, file=sys.stderr)
def write_error(self, content):
self.write_log(content, level=ERROR)
def display_bar(self, bar, bar_is_completed=True, freq=1):
print(u'{} {}'.format(bar.vt_symbol, bar.datetime))
|
py | 7dfe505482baae09760663b447000644ea43a3d4 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates new GCE VMs with specified zone, machine type and boot image."""
def generate_config(context):
"""Generate Deployment Manager configuration."""
resources = []
for vm in context.properties['gce_instances']:
vm_name = vm['name']
zone = vm['zone']
machine_type = 'zones/{}/machineTypes/{}'.format(zone, vm['machine_type'])
boot_image = vm['boot_image_name']
# Create a new VM.
vm_resource = {
'name': vm_name,
'type': 'compute.v1.instance',
'properties': {
'zone': zone,
'machineType': machine_type,
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': boot_image,
},
}],
'networkInterfaces': [{
'network': 'global/networks/default',
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT',
}],
}]
},
}
metadata = vm.get('metadata')
if metadata:
vm_resource['properties']['metadata'] = metadata
resources.append(vm_resource)
# After the VM is created, shut it down (if start_vm is False).
if not vm['start_vm']:
resources.append({
'name': 'stop-' + vm_name,
'action': 'gcp-types/compute-v1:compute.instances.stop',
'properties': {
'instance': vm_name,
'zone': zone,
},
'metadata': {
'dependsOn': [vm_name],
'runtimePolicy': ['CREATE'],
},
})
# Create firewall rules (if any).
for rule in context.properties.get('firewall_rules'):
name = rule.pop('name')
resources.append({
'name': name,
'type': 'compute.v1.firewall',
'properties': rule
})
return {'resources': resources}
|
py | 7dfe505691a6af02ab24f35f41670d56847ee686 | from os.path import join
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from pythonforandroid.toolchain import current_directory
class Pygame2Recipe(CompiledComponentsPythonRecipe):
"""
Recipe to build apps based on SDL2-based pygame.
.. warning:: Some pygame functionality is still untested, and some
dependencies like freetype, postmidi and libjpeg are currently
not part of the build. It's usable, but not complete.
"""
version = '2.0.0-dev7'
url = 'https://github.com/pygame/pygame/archive/android-{version}.tar.gz'
site_packages_name = 'pygame'
name = 'pygame'
depends = ['sdl2', 'sdl2_image', 'sdl2_mixer', 'sdl2_ttf', 'setuptools', 'jpeg', 'png']
call_hostpython_via_targetpython = False # Due to setuptools
install_in_hostpython = False
def prebuild_arch(self, arch):
super().prebuild_arch(arch)
with current_directory(self.get_build_dir(arch.arch)):
setup_template = open(join("buildconfig", "Setup.Android.SDL2.in")).read()
env = self.get_recipe_env(arch)
env['ANDROID_ROOT'] = join(self.ctx.ndk_platform, 'usr')
ndk_lib_dir = join(self.ctx.ndk_platform, 'usr', 'lib')
png = self.get_recipe('png', self.ctx)
png_lib_dir = join(png.get_build_dir(arch.arch), '.libs')
png_inc_dir = png.get_build_dir(arch)
jpeg = self.get_recipe('jpeg', self.ctx)
jpeg_inc_dir = jpeg_lib_dir = jpeg.get_build_dir(arch.arch)
setup_file = setup_template.format(
sdl_includes=(
" -I" + join(self.ctx.bootstrap.build_dir, 'jni', 'SDL', 'include') +
" -L" + join(self.ctx.bootstrap.build_dir, "libs", str(arch)) +
" -L" + png_lib_dir + " -L" + jpeg_lib_dir + " -L" + ndk_lib_dir),
sdl_ttf_includes="-I"+join(self.ctx.bootstrap.build_dir, 'jni', 'SDL2_ttf'),
sdl_image_includes="-I"+join(self.ctx.bootstrap.build_dir, 'jni', 'SDL2_image'),
sdl_mixer_includes="-I"+join(self.ctx.bootstrap.build_dir, 'jni', 'SDL2_mixer'),
jpeg_includes="-I"+jpeg_inc_dir,
png_includes="-I"+png_inc_dir,
freetype_includes=""
)
open("Setup", "w").write(setup_file)
def get_recipe_env(self, arch):
env = super().get_recipe_env(arch)
env['USE_SDL2'] = '1'
env["PYGAME_CROSS_COMPILE"] = "TRUE"
env["PYGAME_ANDROID"] = "TRUE"
return env
recipe = Pygame2Recipe()
|
py | 7dfe52822f3e0b168e9b46e25ba979c37785f1c8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Defines message types for the Receiver class.
#
# This module inspects the EWrapper class to build a set of Message
# types. In creating the types, it also builds a registry of them
# that the Receiver class then uses to determine message types.
##
import sys
from ast import NodeVisitor, parse
from inspect import getsourcefile
from re import match
from ib.ext.AnyWrapper import AnyWrapper
from ib.ext.EWrapper import EWrapper
from ib.ext.EClientSocket import EClientSocket
from ib.lib import toTypeName
class SignatureAccumulator(NodeVisitor):
"""
"""
def __init__(self, classes):
NodeVisitor.__init__(self)
self.signatures = []
for filename in (getsourcefile(cls) for cls in classes):
self.visit(parse(open(filename).read()))
def visit_FunctionDef(self, node):
if sys.version_info[0] < 3:
args = [arg.id for arg in node.args.args]
else:
args = [arg.arg for arg in node.args.args]
self.signatures.append((node.name, args[1:]))
class EClientSocketAccumulator(SignatureAccumulator):
def getSignatures(self):
for name, args in self.signatures:
if match('(?i)req|cancel|place', name):
yield (name, args)
class EWrapperAccumulator(SignatureAccumulator):
def getSignatures(self):
for name, args in self.signatures:
if match('(?!((?i)error.*))', name):
yield (name, args)
##
# Dictionary that associates wrapper method names to the message class
# that should be instantiated for delivery during that method call.
registry = {}
def messageTypeNames():
""" Builds set of message type names.
@return set of all message type names as strings
"""
def typeNames():
for types in list(registry.values()):
for typ in types:
yield typ.typeName
return set(typeNames())
class Message(object):
""" Base class for Message types.
"""
__slots__ = ()
def __init__(self, **kwds):
""" Constructor.
@param **kwds keywords and values for instance
"""
for name in self.__slots__:
setattr(self, name, kwds.pop(name, None))
assert not kwds
def __len__(self):
""" x.__len__() <==> len(x)
"""
return len(list(self.keys()))
def __str__(self):
""" x.__str__() <==> str(x)
"""
name = self.typeName
items = str.join(', ', ['%s=%s' % item for item in list(self.items())])
return '<%s%s>' % (name, (' ' + items) if items else '')
def items(self):
""" List of message (slot, slot value) pairs, as 2-tuples.
@return list of 2-tuples, each slot (name, value)
"""
return list(zip(list(self.keys()), list(self.values())))
def values(self):
""" List of instance slot values.
@return list of each slot value
"""
return [getattr(self, key, None) for key in list(self.keys())]
def keys(self):
""" List of instance slots.
@return list of each slot.
"""
return self.__slots__
class Error(Message):
""" Specialized message type.
The error family of method calls can't be built programmatically,
so we define one here.
"""
__slots__ = ('id', 'errorCode', 'errorMsg')
def buildMessageRegistry(seq, suffixes=[''], bases=(Message, )):
""" Construct message types and add to given mapping.
@param seq pairs of method (name, arguments)
@param bases sequence of base classes for message types
@return None
"""
for name, args in sorted(seq):
for suffix in suffixes:
typename = toTypeName(name) + suffix
typens = {'__slots__':args, '__assoc__':name, 'typeName':name}
msgtype = type(typename, bases, typens)
if name in registry:
registry[name] = registry[name] + (msgtype, )
else:
registry[name] = (msgtype, )
eWrapperAccum = EWrapperAccumulator((AnyWrapper, EWrapper))
eClientAccum = EClientSocketAccumulator((EClientSocket, ))
wrapperMethods = list(eWrapperAccum.getSignatures())
clientSocketMethods = list(eClientAccum.getSignatures())
errorMethods = [('error', Error.__slots__), ]
buildMessageRegistry(wrapperMethods)
buildMessageRegistry(clientSocketMethods, suffixes=('Pre', 'Post'))
buildMessageRegistry(errorMethods)
def initModule():
target = globals()
for messageTypes in list(registry.values()):
for messageType in messageTypes:
target[messageType.typeName] = messageType
try:
initModule()
except (NameError, ):
pass
else:
del(initModule)
del(AnyWrapper)
del(EWrapper)
del(EClientSocket)
del(eWrapperAccum)
del(eClientAccum)
|
py | 7dfe52826120b42fba1133cefcb4a8406afe998b | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner import BaseModule
class BaseOP(BaseModule):
"""Base class for searchable operations.
Args:
in_channels (int): The input channels of the operation.
out_channels (int): The output channels of the operation.
stride (int): Stride of the operation. Defaults to 1.
"""
def __init__(self, in_channels, out_channels, stride=1, **kwargs):
super(BaseOP, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
|
py | 7dfe529ce050f5c171e7dda249f4586a1406dea5 | from utils import *
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath)
OBS_TABLE = OUTPUT_TABLES_LOCATION +"OBSERVATION_FACT.csv"
def fill_nulls():
"""
Fill the mandatory keys in the observation table with default values.
Typically,
Replace the None values in "ENCOUNTER_NUM" by -1,
Replace the None values in "START_DATE" by the default date specified in the config file.
"""
df = pd.read_csv(OBS_TABLE)
df = df.fillna({"ENCOUNTER_NUM":"-1", "START_DATE":DEFAULT_DATE, "PROVIDER_ID":"@"})
df.fillna("").to_csv(OBS_TABLE, index=False)
def reindex():
"""
Replace encounter numbers and patient numbers with integer ones.
In a proper way, the reindexing should yield only distinct encounter numbers, including for originally-NaN values.
For now, it keeps the NaNs in place. They are replaced by a -1 in the fill_nulls() function.
"""
def new_id(row, col):
tmp = lookup[lookup[col] == key].index.values[0]
return tmp if tmp>0 else -1
df = pd.read_csv(OBS_TABLE)
encs = pd.Series(df["ENCOUNTER_NUM"].dropna().unique())
pats = pd.Series(df["PATIENT_NUM"].dropna().unique())
df = df.fillna({"ENCOUNTER_NUM":""})
lookup = pd.DataFrame({"ENCOUNTER_NUM":encs, "PATIENT_NUM":pats})
padding = pd.DataFrame([[""]*len(lookup.columns)], columns=lookup.columns)
lookup = padding.append(lookup, ignore_index=True)
# todo: swap using the lookup
for col in ["ENCOUNTER_NUM", "PATIENT_NUM"]:
print("Reindexing ", col)
gpd = df.groupby(col)
counter=0
for key, subdf in gpd.groups.items():
counter = counter+1
if counter%100==0:
print(" Reindexed", counter, "groups out of ", len(gpd.groups.keys()))
df.loc[subdf, col] = new_id(key, col)
#df["ENCOUNTER_NUM"]=df.apply(lambda row: new_id(row, "ENCOUNTER_NUM"), axis=1)
#print("Reindexing patient numbers")
#df["PATIENT_NUM"]=df.apply(lambda row: new_id(row, "PATIENT_NUM"), axis=1)
df["TEXT_SEARCH_INDEX"] = df.index.values+1
df.to_csv(OBS_TABLE, index=False)
return lookup
def check_basecodes(stop=False):
df = pd.read_csv(OBS_TABLE)
conc = pd.Series(df["CONCEPT_CD"].unique())
mod = pd.Series(df["MODIFIER_CD"].unique())
mod = mod[~mod.isin(["@"])]
mod_dim = pd.read_csv(OUTPUT_TABLES_LOCATION+"MODIFIER_DIMENSION.csv")["MODIFIER_CD"]
conc_dim = pd.read_csv(OUTPUT_TABLES_LOCATION+"CONCEPT_DIMENSION.csv")["CONCEPT_CD"]
if stop:
print("Some concepts or modifiers are not in the ontology. \nTake a look at the \"missing_concepts\" and \"missing_modifiers\" variables. \\\
\nYou can access the tail of modifiers (typically showing the terminology codes) through the \"tail_modifiers\" variable")
missing_concepts = conc[~conc.isin(conc_dim)]
missing_modifiers = mod[~mod.isin(mod_dim)]
tail_modifiers = mod[mod.rfind("\\"):]
pd.set_option('display.max_colwidth', None)
pdb.set_trace()
return all(conc.isin(conc_dim)) and all (mod.isin(mod_dim)) |
py | 7dfe52b2ead306a2fd8580cb3723be62ba974487 | # S3Search Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/tests/unit_tests/modules/s3/s3search.py
import unittest
from gluon import *
from s3.s3search import *
# =============================================================================
class TestS3SearchSimpleWidget(unittest.TestCase):
"""
Test the S3SearchSimpleWidget to make sure it can create queries for
real and virtual fields.
"""
def setUp(self):
# This is where I should create a resource and filters
self.resource = current.s3db.resource("hrm_human_resource")
self.widget = S3SearchSimpleWidget(field="person_id$first_name", _size=20)
self.virtual_field_widget = S3SearchSimpleWidget(field="course")
def test_query(self):
# Test the query method.
# Pass no value
self.assertEqual(self.widget.query(self.resource, ""), None)
# Pass a single value
self.assertNotEqual(self.widget.query(self.resource, "1"), None)
# Pass space-separated values
self.assertNotEqual(self.widget.query(self.resource, "two values"), None)
# Test against virtual field
self.assertNotEqual(self.virtual_field_widget.query(self.resource, "value"), None)
def test_widget(self):
# Test the default HTML widget
self.assertEqual(str(self.widget.widget(self.resource)),
str(INPUT(_id="human_resource_search_simple",
_name="human_resource_search_simple",
_size="20",
_type="text")))
# Test the widget with extra values
self.assertEqual(str(self.widget.widget(self.resource,
value="search string")),
str(INPUT(_id="human_resource_search_simple",
_name="human_resource_search_simple",
_size="20",
_type="text",
_value="search string")))
# =============================================================================
class TestS3SearchOptionsWidget(unittest.TestCase):
"""
Test S3SearchOptionsWidget
"""
def setUp(self):
self.resource = current.s3db.resource("hrm_human_resource")
def testQuery(self):
# Test the query method
pass
def testWidget(self):
T = current.T
# Test the widget method
# Test the widget method with a virtual field and no options
widget = S3SearchOptionsWidget("virtual_field",
options={})
output = widget.widget(self.resource, {})
self.assertEqual(str(output),
str(SPAN(T("No options available"),
_class="no-options-available")))
# Test widget with virtual field and one option.
# Should return no-options message.
# - no longer!
#widget = S3SearchOptionsWidget("virtual_field",
# options={1:"One"})
#output = widget.widget(self.resource, {})
#self.assertEqual(str(output),
# str(SPAN(T("No options available"),
# _class="no-options-available")))
# Test widget with virtual field and multiple options.
widget = S3SearchOptionsWidget("virtual_field",
options={1:"One", 2:"Two"})
output = widget.widget(self.resource, {})
self.assertEqual(str(output),
str(TABLE(TR(TD(INPUT(_name="human_resource_search_select_virtual_field",
_id="id-human_resource_search_select_virtual_field-0",
_type="checkbox",
_value="1"),
LABEL("One",
_for="id-human_resource_search_select_virtual_field-0"),
),
),
TR(TD(INPUT(_name="human_resource_search_select_virtual_field",
_id="id-human_resource_search_select_virtual_field-1",
_type="checkbox",
_value="2"),
LABEL("Two",
_for="id-human_resource_search_select_virtual_field-1")
)
),
_class="s3-checkboxes-widget",
_id="human_resource_search_select_virtual_field",
_name="human_resource_search_select_virtual_field_widget")))
# =============================================================================
class TestS3SearchMinMaxWidget(unittest.TestCase):
"""
Test S3SearchOptionsWidget
"""
def setUp(self):
#self.resource = current.s3db.resource("inv_track_item")
pass
def testQuery(self):
# Test the query method
pass
def testWidgetLabel(self):
# Test the widget label method
output = S3SearchMinMaxWidget.widget_label(dict(name="wname",
label="wlabel"))
self.assertEqual(str(output),
str(LABEL("wlabel", _for="id-wname")))
def testWidgetInput(self):
# Test the widget label method
output = S3SearchMinMaxWidget.widget_input(dict(name="wname",
label="wlabel",
requires="",
attributes=dict(_class="wclass")))
self.assertEqual(str(output),
str(INPUT(_name="wname", _id="id-wname", _class="wclass")))
# =============================================================================
class S3FilterWidgetTests(unittest.TestCase):
""" Tests for S3FilterWidget base class helper methods """
def testInit(self):
""" Test filter widget constructor """
widget = S3FilterWidget(["name", "organisation_id$name"],
option="test option",
_class="test-class")
self.assertTrue("option" in widget.opts)
self.assertTrue(len(widget.opts), 1)
self.assertTrue(widget.opts["option"] == "test option")
self.assertTrue("_class" in widget.attr)
self.assertTrue(len(widget.attr), 1)
self.assertTrue(widget.attr["_class"] == "test-class")
def testRender(self):
""" Test rendering of the hidden data element """
widget = S3FilterWidget(["name", "organisation_id$name"])
# Override widget renderer
widget.widget = lambda resource, values: ""
resource = current.s3db.resource("org_office")
output = widget(resource, get_vars={})
self.assertTrue(isinstance(output[0], INPUT))
# Check attributes of the hidden data element
attr = output[0].attributes
t = attr["_type"]
self.assertEqual(t, "hidden")
c = attr["_class"]
# Generic class
self.assertTrue("filter-widget-data" in c)
# Widget-type-specific class
self.assertTrue("%s-data" % widget._class in c)
i = attr["_id"]
self.assertEqual(i, "%s-org_office_name-org_organisation_name-%s-data" %
(resource.alias, widget._class))
v = attr["_value"]
self.assertEqual(v, "~.name|~.organisation_id$name")
def testSelector(self):
""" Test construction of the URL query selector for a filter widget """
fields = "name"
s3db = current.s3db
resource = s3db.resource("org_organisation")
label, selector = S3FilterWidget._selector(resource, fields)
self.assertEqual(selector, "~.name")
fields = "nonexistent_component.name"
resource = s3db.resource("org_organisation")
label, selector = S3FilterWidget._selector(resource, fields)
self.assertEqual(selector, None)
fields = ["name", "organisation_id$name"]
resource = s3db.resource("org_office")
label, selector = S3FilterWidget._selector(resource, fields)
self.assertEqual(selector, "~.name|~.organisation_id$name")
fields = []
resource = s3db.resource("org_organisation")
label, selector = S3FilterWidget._selector(resource, fields)
self.assertEqual(selector, None)
def testVariable(self):
""" Test construction of the URL variable for filter widgets """
variable = S3FilterWidget._variable("organisation.name", "like")
self.assertEqual(variable, "organisation.name__like")
variable = S3FilterWidget._variable("organisation.name", None)
self.assertEqual(variable, "organisation.name")
variable = S3FilterWidget._variable("organisation.name", "")
self.assertEqual(variable, "organisation.name")
variable = S3FilterWidget._variable("organisation.name", ("ge", "le"))
self.assertEqual(variable, ["organisation.name__ge",
"organisation.name__le"])
def testValues(self):
""" Test extraction of filter widget values from GET vars """
get_vars = {"test_1": "1",
"test_2": ["1,2", "3"]}
values = S3FilterWidget._values(get_vars, "test_1")
self.assertEqual(len(values), 1)
self.assertTrue("1" in values)
values = S3FilterWidget._values(get_vars, "test_2")
self.assertEqual(len(values), 3)
self.assertTrue("1" in values)
self.assertTrue("2" in values)
self.assertTrue("3" in values)
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
TestS3SearchSimpleWidget,
TestS3SearchOptionsWidget,
TestS3SearchMinMaxWidget,
S3FilterWidgetTests,
)
# END ========================================================================
|
py | 7dfe53502e2ba0b18971138dc05cf5b7f7650c89 | def strip_empty_symbols(symbols):
assert isinstance(symbols, list)
new_symbols = []
for symbol in symbols:
if len(symbol) != 0:
new_symbols.append(symbol)
return new_symbols
def strip_updir(file_name):
"""Strips all '../' from start of file_name"""
fn = file_name
while fn.find('..', 0) == 0:
fn = fn[3:]
return fn
|
py | 7dfe54c48cbd6feada6f68b5b85c4279ba632938 | #!/usr/bin/python
# Handle unicode encoding
import csv
import errno
import getpass
import itertools
import locale
import os
import platform
import threading
import time
import shlex
import socket
import sys
import tempfile
import urllib2
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
from re import compile, escape, sub
from subprocess import Popen, call, PIPE, STDOUT
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
try:
import json
HAS_JSON = True
except Exception:
HAS_JSON = False
fsli_C_FAILED = 1
fsli_C_OK = 2
fsli_C_SKIP = 4
fsli_C_WARN = 3
CURRENT = 0
UPDATE = 1
UPGRADE = 2
class Version(object):
def __init__(self, version_string):
v_vals = version_string.split('.')
for v in v_vals:
if not v.isdigit():
raise ValueError('Bad version string')
self.major = int(v_vals[0])
try:
self.minor = int(v_vals[1])
except IndexError:
self.minor = 0
try:
self.patch = int(v_vals[2])
except IndexError:
self.patch = 0
try:
self.hotfix = int(v_vals[3])
except IndexError:
self.hotfix = 0
def __repr__(self):
return "Version(%s,%s,%s,%s)" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __str__(self):
if self.hotfix == 0:
return "%s.%s.%s" % (self.major, self.minor, self.patch)
else:
return "%s.%s.%s.%s" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __ge__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self > other or self == other:
return True
return False
def __le__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self < other or self == other:
return True
return False
def __cmp__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__lt__(other):
return -1
if self.__gt__(other):
return 1
return 0
def __lt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major < other.major:
return True
if self.major > other.major:
return False
if self.minor < other.minor:
return True
if self.minor > other.minor:
return False
if self.patch < other.patch:
return True
if self.patch > other.patch:
return False
if self.hotfix < other.hotfix:
return True
if self.hotfix > other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major > other.major:
return True
if self.major < other.major:
return False
if self.minor > other.minor:
return True
if self.minor < other.minor:
return False
if self.patch > other.patch:
return True
if self.patch < other.patch:
return False
if self.hotfix > other.hotfix:
return True
if self.hotfix < other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
if (
self.major == other.major and
self.minor == other.minor and
self.patch == other.patch and
self.hotfix == other.hotfix):
return True
return False
def __ne__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__eq__(other):
return False
return True
version = Version('3.0.16')
def memoize(f):
cache = f.cache = {}
def g(*args, **kwargs):
key = (f, tuple(args), frozenset(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
class InstallError(Exception):
pass
class shell_colours(object):
default = '\033[0m'
rfg_kbg = '\033[91m'
gfg_kbg = '\033[92m'
yfg_kbg = '\033[93m'
mfg_kbg = '\033[95m'
yfg_bbg = '\033[104;93m'
bfg_kbg = '\033[34m'
bold = '\033[1m'
class MsgUser(object):
__debug = False
__quiet = False
@classmethod
def debugOn(cls):
cls.__debug = True
@classmethod
def debugOff(cls):
cls.__debug = False
@classmethod
def quietOn(cls):
cls.__quiet = True
@classmethod
def quietOff(cls):
cls.__quiet = False
@classmethod
def isquiet(cls):
return cls.__quiet
@classmethod
def isdebug(cls):
return cls.__debug
@classmethod
def debug(cls, message, newline=True):
if cls.__debug:
mess = str(message)
if newline:
mess += "\n"
sys.stderr.write(mess)
@classmethod
def message(cls, msg):
if cls.__quiet:
return
print msg
@classmethod
def question(cls, msg):
print msg,
@classmethod
def skipped(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.mfg_kbg, "[Skipped] ", shell_colours.default, msg))
@classmethod
def ok(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.gfg_kbg, "[OK] ", shell_colours.default, msg))
@classmethod
def failed(cls, msg):
print "".join(
(shell_colours.rfg_kbg, "[FAILED] ", shell_colours.default, msg))
@classmethod
def warning(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.bfg_kbg,
shell_colours.bold,
"[Warning]",
shell_colours.default, " ", msg))
class Progress_bar(object):
def __init__(self, x=0, y=0, mx=1, numeric=False, percentage=False):
self.x = x
self.y = y
self.width = 50
self.current = 0
self.max = mx
self.numeric = numeric
self.percentage = percentage
def update(self, reading):
if MsgUser.isquiet():
return
percent = int(round(reading * 100.0 / self.max))
cr = '\r'
if not self.numeric and not self.percentage:
bar = '#' * int(percent)
elif self.numeric:
bar = "/".join(
(str(reading),
str(self.max))) + ' - ' + str(percent) + "%\033[K"
elif self.percentage:
bar = "%s%%" % (percent)
sys.stdout.write(cr)
sys.stdout.write(bar)
sys.stdout.flush()
self.current = percent
if percent == 100:
sys.stdout.write(cr)
if not self.numeric and not self.percentage:
sys.stdout.write(" " * int(percent))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.numeric:
sys.stdout.write(" " * (len(str(self.max))*2 + 8))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.percentage:
sys.stdout.write("100%")
sys.stdout.write(cr)
sys.stdout.flush()
def temp_file_name(mode='r', close=False):
'''Return a name for a temporary file - uses mkstemp to create the file and
returns a tuple (file object, file name).
Opens as read-only unless mode specifies otherwise. If close is set to True
will close the file before returning.
The file object is a fdopen file object so lacks a useable file name.'''
(tmpfile, fname) = tempfile.mkstemp()
file_obj = os.fdopen(tmpfile, mode)
if close:
file_obj.close()
return (file_obj, fname)
class RunCommandError(Exception):
pass
class Spinner(object):
spinner = itertools.cycle(('-', '\\', '|', '/', ))
busy = False
delay = 0.2
def __init__(self, delay=None, quiet=False):
if delay:
try:
self.delay = float(delay)
except ValueError:
pass
self.quiet = quiet
def spin_it(self):
while self.busy:
sys.stdout.write(self.spinner.next())
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
if not self.quiet:
self.busy = True
threading.Thread(target=self.spin_it).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def run_cmd_dropstdout(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=None, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(_, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
def run_cmd(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
MsgUser.debug("Will call %s" % (command_line))
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(output, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
MsgUser.debug("Command completed successfully (%s)" % (output))
return output
def run_cmd_displayoutput(command, as_root=False):
'''Run the command and display output.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
MsgUser.debug("Will call %s" % (command_line))
cmd = Popen(
command_line,
stdin=PIPE, stdout=sys.stdout, stderr=sys.stderr)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
return_code = cmd.returncode
else:
return_code = call(command_line)
if return_code:
MsgUser.debug("An error occured (%s)" % (return_code))
raise RunCommandError(return_code)
MsgUser.debug("Command completed successfully")
def check_sudo(sudo_pwd):
command_line = ['sudo', '-S', 'true']
MsgUser.debug("Checking sudo password")
cmd = Popen(
command_line,
stdin=PIPE,
stdout=DEVNULL,
stderr=DEVNULL
)
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
if cmd.returncode != 0:
return False
else:
return True
class SudoPasswordError(Exception):
pass
@memoize
def get_sudo_pwd():
'''Get the sudo password from the user'''
MsgUser.message("We require your password to continue...")
attempts = 0
valid = False
while attempts < 3 and not valid:
sudo_pwd = getpass.getpass('password: ')
valid = check_sudo(sudo_pwd)
if not valid:
MsgUser.failed("Incorrect password")
attempts += 1
if not valid:
raise SudoPasswordError()
return sudo_pwd
class DeletionRefused(Exception):
pass
class SafeDeleteError(Exception):
pass
def safe_delete(fs_object, as_root=False):
'''Delete file/folder, becoming root if necessary.
Run some sanity checks on object'''
banned_items = ['/', '/usr', '/usr/bin', '/usr/local', '/bin',
'/sbin', '/opt', '/Library', '/System', '/System/Library',
'/var', '/tmp', '/var/tmp', '/lib', '/lib64', '/Users',
'/home', '/Applications', '/private', '/etc', '/dev',
'/Network', '/net', '/proc']
if os.path.isdir(fs_object):
del_opts = "-rf"
else:
del_opts = '-f'
if fs_object in banned_items:
raise DeletionRefused('Will not delete %s!' % (fs_object))
command_line = " ".join(('rm', del_opts, fs_object))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise SafeDeleteError(str(e))
return result
class MoveError(Exception):
pass
def move(source, target, as_root):
try:
run_cmd_dropstdout(" ".join(('mv', source, target)), as_root)
except RunCommandError, e:
raise MoveError(str(e))
class IsDirectoryError(Exception):
pass
class CopyFileError(Exception):
pass
def copy_file(fname, destination, as_root):
'''Copy a file using sudo if necessary'''
MsgUser.debug("Copying %s to %s (as root? %s)" % (
fname, destination, as_root))
if os.path.isdir(fname):
raise IsDirectoryError('Source (%s) is a director!' % (fname))
if os.path.isdir(destination):
# Ensure that copying into a folder we have a terminating slash
destination = destination.rstrip('/') + "/"
copy_opts = '-p'
command_line = " ".join(('cp', copy_opts, fname, destination))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise CopyFileError(str(e))
return result
def file_contains(fname, search_for):
'''Equivalent of grep'''
regex = compile(escape(search_for))
found = False
MsgUser.debug("In file_contains.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = True
break
f.close()
return found
def file_contains_1stline(fname, search_for):
'''Equivalent of grep - returns first occurrence'''
regex = compile(escape(search_for))
found = ''
MsgUser.debug("In file_contains_1stline.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = l
break
f.close()
return found
def line_string_replace(line, search_for, replace_with):
return sub(escape(search_for), escape(replace_with), line)
def line_starts_replace(line, search_for, replace_with):
if line.startswith(search_for):
return replace_with + '\n'
return line
class MoveFileError(Exception):
pass
def move_file(from_file, to_file, requires_root=False):
'''Move a file, using /bin/cp via sudo if requested.
Will work around known bugs in python.'''
if requires_root:
try:
run_cmd_dropstdout(" ".join(
("/bin/cp", from_file, to_file)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to move %s (%s)" % (from_file, str(e)))
os.remove(from_file)
else:
try:
move(from_file, to_file, requires_root)
except OSError, e:
# Handle bug in some python versions on OS X writing to NFS home
# folders, Python tries to preserve file flags but NFS can't do
# this. It fails to catch this error and ends up leaving the file
# in the original and new locations!
if e.errno == 45:
# Check if new file has been created:
if os.path.isfile(to_file):
# Check if original exists
if os.path.isfile(from_file):
# Destroy original and continue
os.remove(from_file)
else:
try:
run_cmd_dropstdout("/bin/cp %s %s" % (
from_file, to_file), as_root=False)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to copy from %s (%s)" % (
from_file, str(e)))
os.remove(from_file)
else:
raise
except Exception:
raise
class EditFileError(Exception):
pass
def edit_file(fname, edit_function, search_for, replace_with, requires_root):
'''Search for a simple string in the file given and replace
it with the new text'''
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
line = edit_function(line, search_for, replace_with)
tmpfile.write(line)
src.close()
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
MsgUser.debug(e)
os.remove(tmpfname)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
MsgUser.debug("Modified %s (search %s; replace %s)." % (
fname, search_for, replace_with))
class AddToFileError(Exception):
pass
def add_to_file(fname, add_lines, requires_root):
'''Add lines to end of a file'''
if isinstance(add_lines, basestring):
add_lines = add_lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
tmpfile.write(line)
src.close()
tmpfile.write('\n')
for line in add_lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise AddToFileError("Failed to add to file %s (%s)" % (
fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror + tmpfname + fname)
raise AddToFileError("Failed to add to file %s" % (fname))
MsgUser.debug("Modified %s (added %s)" % (fname, '\n'.join(add_lines)))
class CreateFileError(Exception):
pass
def create_file(fname, lines, requires_root):
'''Create a new file containing lines given'''
if isinstance(lines, basestring):
lines = lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
for line in lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except CreateFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise CreateFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise CreateFileError("Failed to create %s" % (fname))
MsgUser.debug("Created %s (added %s)" % (fname, '\n'.join(lines)))
class UnsupportedOs(Exception):
pass
class Host(object):
'''Work out which platform we are running on'''
o_s = platform.system().lower()
arch = platform.machine()
applever = ''
os_type = os.name
supported = True
if o_s == 'darwin':
vendor = 'apple'
version = Version(platform.release())
(applever, _, _) = platform.mac_ver()
glibc = ''
elif o_s == 'linux':
if hasattr(platform, 'linux_distribution'):
# We have a modern python (>2.4)
(vendor, version, _) = platform.linux_distribution(
full_distribution_name=0)
else:
(vendor, version, _) = platform.dist()
vendor = vendor.lower()
version = Version(version)
glibc = platform.libc_ver()[1]
else:
supported = False
if arch == 'x86_64':
bits = '64'
elif arch == 'i686':
bits = '32'
elif arch == 'Power Macintosh':
bits = ''
def is_writeable(location):
'''Check if we can write to the location given'''
writeable = True
try:
tfile = tempfile.NamedTemporaryFile(mode='w+b', dir=location)
tfile.close()
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
writeable = False
else:
raise
return writeable
def is_writeable_as_root(location):
'''Check if sudo can write to a given location'''
# This requires us to use sudo
(f, fname) = temp_file_name(mode='w')
f.write("FSL")
f.close()
result = False
tmptarget = '/'.join((location, os.path.basename(fname)))
MsgUser.debug(" ".join(('/bin/cp', fname, tmptarget)))
try:
run_cmd_dropstdout(" ".join(('/bin/cp',
fname, tmptarget)), as_root=True)
result = True
os.remove(fname)
run_cmd_dropstdout(" ".join(('/bin/rm',
'-f', tmptarget)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
os.remove(fname)
result = False
MsgUser.debug("Writeable as root? %s" % (result))
return result
class ChecksumCalcError(Exception):
pass
def sha256File(filename, bs=1048576):
'''Returns the sha256 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
fhash = hashlib.sha256()
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
except ImportError:
# No SHA256 support on python pre-2.5 so call the OS to do it.
try:
result = run_cmd(" ".join(('sha256sum', '-b', filename)))
return parsesha256sumfile(result)
except RunCommandError, e:
MsgUser.debug("SHA256 calculation error %s" % (str(e)))
raise ChecksumCalcError
def parsesha256sumfile(sha256string):
'''Returns sha256 sum extracted from the output of sha256sum or shasum -a
256 from OS X/Linux platforms'''
(sha256, _) = sha256string.split("*")
return sha256.strip()
def md5File(filename, bs=1048576):
'''Returns the MD5 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
fhash = hashlib.md5()
except ImportError:
import md5
fhash = md5.new()
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
def file_checksum(filename, chktype='sha256'):
if chktype == 'sha256':
return sha256File(filename)
if chktype == 'md5':
return md5File(filename)
else:
raise ChecksumCalcError('Unrecognised checksum type')
class OpenUrlError(Exception):
pass
def open_url(url, start=0, timeout=20):
socket.setdefaulttimeout(timeout)
MsgUser.debug("Attempting to download %s." % (url))
try:
req = urllib2.Request(url)
if start != 0:
req.headers['Range'] = 'bytes=%s-' % (start)
rf = urllib2.urlopen(req)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (url, e.msg))
raise OpenUrlError("Cannot find file %s on server (%s). "
"Try again later." % (url, e.msg))
except urllib2.URLError, e:
if type(e.reason) != str:
errno = e.reason.args[0]
message = e.reason.args[1]
if errno == 8:
# Bad host name
MsgUser.debug("%s %s" % (url,
"Unable to find FSL download "
"server in the DNS"))
else:
# Other error
MsgUser.debug("%s %s" % (url, message))
else:
message = e.reason
raise OpenUrlError(
"Cannot find %s (%s). Try again later." % (url, message))
except socket.timeout, e:
MsgUser.debug(e)
raise OpenUrlError("Failed to contact FSL web site. Try again later.")
return rf
class DownloadFileError(Exception):
pass
def download_file(url, localf, timeout=20):
'''Get a file from the url given storing it in the local file specified'''
try:
rf = open_url(url, 0, timeout)
except OpenUrlError, e:
raise DownloadFileError(str(e))
metadata = rf.info()
rf_size = int(metadata.getheaders("Content-Length")[0])
dl_size = 0
block = 16384
x = 0
y = 0
pb = Progress_bar(x, y, rf_size, numeric=True)
for attempt in range(1, 6):
# Attempt download 5 times before giving up
pause = timeout
try:
try:
lf = open(localf, 'ab')
except Exception:
raise DownloadFileError("Failed to create temporary file.")
while True:
buf = rf.read(block)
if not buf:
break
dl_size += len(buf)
lf.write(buf)
pb.update(dl_size)
lf.close()
except (IOError, socket.timeout), e:
MsgUser.debug(e.strerror)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
pause = 0
if dl_size != rf_size:
time.sleep(pause)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
try:
rf = open_url(url, dl_size, timeout)
except OpenUrlError, e:
MsgUser.debug(e)
else:
break
if dl_size != rf_size:
raise DownloadFileError("Failed to download file.")
def build_url_with_protocol(protocol, base, parts):
part_l = [protocol + '://' + base.strip('/')]
part_l.extend([x.strip('/') for x in parts])
return '/'.join(part_l)
def build_url(parts):
part_l = [parts[0].strip('/')]
part_l.extend([x.strip('/') for x in parts[1:]])
return '/'.join(part_l)
class SiteNotResponding(Exception):
pass
def fastest_mirror(main_mirrors, mirrors_file, timeout=20):
'''Find the fastest mirror for FSL downloads.'''
MsgUser.debug("Calculating fastest mirror")
socket.setdefaulttimeout(timeout)
# Get the mirror list from the url
fastestmirrors = {}
mirrorlist = []
for m in main_mirrors:
MsgUser.debug("Trying %s" % (m))
m_url = '/'.join((m.strip('/'), mirrors_file))
MsgUser.debug("Attempting to open %s" % (m_url))
try:
response = urllib2.urlopen(url=m_url)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (m_url, e.msg))
raise SiteNotResponding(e.msg)
except urllib2.URLError, e:
if isinstance(e.reason, socket.timeout):
MsgUser.debug("Time out trying %s" % (m_url))
raise SiteNotResponding(m)
else:
MsgUser.debug(str(e.reason))
raise SiteNotResponding(str(e.reason))
except socket.timeout, e:
MsgUser.debug(e)
raise SiteNotResponding(str(e))
except Exception, e:
MsgUser.debug("Unhandled exception %s" % (str(e)))
raise
else:
mirrorlist = response.read().strip().split('\n')
MsgUser.debug("Received the following "
"mirror list %s" % (mirrorlist))
continue
if len(mirrorlist) == 0:
raise ServerFailure("Cannot find FSL download servers")
# Check timings from the urls specified
if len(mirrorlist) > 1:
for mirror in mirrorlist:
MsgUser.debug("Trying %s" % (mirror))
then = time.time()
if mirror.startswith('http:'):
serverport = 80
elif mirror.startswith('https:'):
serverport = 443
else:
raise ServerFailure("Unrecognised protocol")
try:
mysock = socket.create_connection((mirror, serverport),
timeout)
pingtime = time.time() - then
mysock.close()
fastestmirrors[pingtime] = mirror
MsgUser.debug("Mirror responded in %s seconds" % (pingtime))
except socket.gaierror, e:
MsgUser.debug("%s can't be resolved" % (e))
except socket.timeout, e:
MsgUser.debug(e)
if len(fastestmirrors) == 0:
raise ServerFailure('Failed to contact any FSL download sites.')
download_url = fastestmirrors[min(fastestmirrors.keys())]
else:
download_url = mirrorlist[0]
return download_url
# Concept:
# Web app creates the following files:
# fslmirrorlist.txt - contains a list of mirror urls
# fslreleases.json - contains the available maps for oses
# mapping to a download url
# {'installer' {
# 'filename': 'fslinstaller.py',
# 'version': '3.0.0',
# 'date': '02/03/2017',
# 'checksum_type', 'sha256',
# 'checksum'},
# 'linux' : {
# 'centos' : {
# 'x86_64': {
# '6': {
# '5.0.9': {
# 'filename': 'fsl-5.0.9-centos6_64.tar.gz',
# 'version': '5.0.9',
# 'date': '01/02/2017',
# 'checksum_type', 'sha256',
# 'checksum': 'abf645662bcf4453235',
# },
# },
# },
# },
# 'rhel' : {'alias': 'centos'}},
# 'apple' : {
# 'darwin' : {
# 'x86_64': {
# '11': {
# ....
# },
# }
@memoize
def get_web_manifest(download_url, timeout=20):
'''Download the FSL manifest from download_url'''
socket.setdefaulttimeout(timeout)
MsgUser.debug("Looking for manifest at %s." % (download_url))
if HAS_JSON:
MsgUser.debug("Downloading JSON file")
return get_json(download_url + Settings.manifest_json)
else:
MsgUser.debug("Downloading CSV file")
return get_csv_dict(download_url + Settings.manifest_csv)
class GetFslDirError(Exception):
pass
@memoize
def get_fsldir(specified_dir=None, install=False):
'''Find the installed version of FSL using FSLDIR
or location of this script'''
def validate_fsldir(directory):
parent = os.path.dirname(directory)
if parent == directory:
raise GetFslDirError(
"%s appears to be the root folder" %
parent)
if not os.path.exists(parent):
raise GetFslDirError(
"%s doesn't exist" %
parent)
if not os.path.isdir(parent):
raise GetFslDirError(
"%s isn't a directory" %
parent)
if (os.path.exists(directory) and not
os.path.exists(os.path.join(
directory, 'etc', 'fslversion'
))):
raise GetFslDirError(
"%s exists and doesn't appear to be an installed FSL folder" %
directory)
if specified_dir:
if install is False:
if not check_fsl_install(specified_dir):
raise GetFslDirError(
"%s isn't an 'fsl' folder" %
specified_dir)
else:
validate_fsldir(specified_dir)
return specified_dir
try:
fsldir = os.environ['FSLDIR']
try:
validate_fsldir(fsldir)
except GetFslDirError:
# FSLDIR environment variable is incorrect!
MsgUser.warning('FSLDIR environment variable '
'does not point at FSL install, ignoring...')
MsgUser.debug('FSLDIR is set to %s - '
'this folder does not appear to exist' % (fsldir))
fsldir = None
else:
fsldir = fsldir.rstrip('/')
if MsgUser.isquiet():
return fsldir
except KeyError:
# Look to see if I'm in an FSL install
try:
my_parent = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
except NameError:
# Running in debugger - __file__ not set, assume it's cwd
my_parent = os.path.dirname(
os.path.dirname(os.getcwd()))
try:
validate_fsldir(my_parent)
fsldir = my_parent
except GetFslDirError:
fsldir = None
if not install:
MsgUser.debug("asking about %s" % (fsldir))
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'inst_loc', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.falied(str(e))
return fsldir
else:
if not MsgUser.isquiet():
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'location', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.failed(str(e))
MsgUser.message(
'''Hint - press Enter to select the default value '''
'''given in the square brackets.
If you are specifying a destination folder this needs to either be an existing
FSL install folder or a folder that doesn't already exist.''')
fsldir = None
else:
raise GetFslDirError(
"I can't locate FSL, try again using '-d <FSLDIR>' "
"to specify where to find the FSL install")
return fsldir
def archive_version(archive):
'''Takes the path to a FSL install file
and works out what version it is.'''
if not os.path.isfile(archive):
raise NotAFslVersion("%s is not a file" % (archive))
else:
# file is of form: fsl-V.V.V-platform.extensions
(_, vstring, _) = archive.strip().split('-', 2)
try:
return Version(vstring)
except ValueError:
raise NotAFslVersion(
"%s doesn't look like "
"a version number" % (vstring))
class NotAFslVersion(Exception):
pass
class GetInstalledVersionError(Exception):
pass
def get_installed_version(fsldir):
'''Takes path to FSLDIR and finds installed version details'''
MsgUser.debug("Looking for fsl in %s" % fsldir)
v_file = os.path.join(fsldir, 'etc', 'fslversion')
if os.path.exists(v_file):
f = open(v_file)
v_string = f.readline()
f.close()
try:
version = Version(v_string.strip())
except ValueError:
raise NotAFslVersion(
"%s not a valid "
"version string" % (v_string.strip()))
else:
MsgUser.debug(
"No version information found - "
"is this actually an FSL dir?")
raise GetInstalledVersionError(
"Cannot find the version information - "
"is this actually an FSL dir?")
MsgUser.debug("Found version %s" % (version))
return version
def which_shell():
return os.path.basename(os.getenv("SHELL"))
class SelfUpdateError(Exception):
pass
def self_update(server_url):
'''Check for and apply an update to myself'''
# See if there is a newer version available
if 'fslinstaller' in sys.argv[0]:
try:
installer = get_installer(server_url)
except GetInstallerError, e:
MsgUser.debug("Failed to get installer version %s." % (str(e)))
raise SelfUpdateError('Failed to get installer version. '
'Please try again later.')
MsgUser.debug("Server has version " + installer['version'])
if Version(installer['version']) <= version:
MsgUser.debug("Installer is up-to-date.")
return
# There is a new version available - download it
MsgUser.message("There is a newer version (%s) of the installer "
"(you have %s) updating..." % (
installer['version'], version))
(_, tmpfname) = temp_file_name(mode='w', close=True)
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), installer['filename']))
download_file(
url=file_url,
localf=tmpfname)
if (
file_checksum(tmpfname, installer['checksum_type']) !=
installer['checksum']):
raise SelfUpdateError(
"Found update to installer but download "
"was corrupt. Please try again later.")
except DownloadFileError, e:
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
MsgUser.debug("Failed to update installer %s." % (str(e)))
raise SelfUpdateError(
'Found update to installer but unable to '
'download the new version. Please try again.')
else:
downloaded = True
# Now run the new installer
# EXEC new script with the options we were given
os.chmod(tmpfname, 0755)
c_args = [sys.executable, tmpfname, ]
c_args.extend(sys.argv[1:])
MsgUser.debug(
"Calling %s %s" % (sys.executable, c_args))
os.execv(sys.executable, c_args)
else:
# We are now running the newly downloaded installer
MsgUser.ok('Installer updated to latest version %s' % (str(version)))
MsgUser.ok("Installer self update successful.")
class ServerFailure(Exception):
pass
class BadVersion(Exception):
pass
class GetInstallerError(Exception):
pass
def get_installer(server_url):
MsgUser.debug("Checking %s for "
"installer information" % (server_url))
manifest = get_web_manifest(server_url)
return manifest['installer']
@memoize
def get_releases(server_url):
'''Return a hash with all information about available
versions for this OS'''
computer = Host
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
os_definition = manifest[computer.o_s][computer.vendor]
except KeyError:
raise UnsupportedOs("%s %s not supported by this installer" % (
computer.o_s, computer.vendor
))
t_version = computer.version.major
alias_t = 'alias'
if alias_t in os_definition.keys():
if str(t_version) in os_definition[alias_t]:
os_parent = os_definition[alias_t][
str(t_version)]['parent']
os_definition = manifest[computer.o_s][os_parent]
if computer.arch not in os_definition.keys():
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.arch
))
os_def = os_definition[computer.arch]
while t_version > 0:
MsgUser.debug("Trying version %s" % (t_version))
if str(t_version) not in os_def.keys():
MsgUser.debug("...not found")
t_version -= 1
else:
break
if t_version == 0:
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.version.major
))
elif t_version != computer.version.major:
MsgUser.warning(
"%s %s not officially supported "
"- trying to locate support for an earlier "
"version - this may not work" % (
computer.vendor, computer.version.major))
return os_definition[computer.arch][str(t_version)]
class ExtraDownloadError(Exception):
pass
@memoize
def get_extra(server_url, extra_type):
'''Return a hash with all information about available
versions of source code'''
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
extra = manifest[extra_type]
except KeyError:
raise ExtraDownloadError("Unrecognised extra %s" % (extra_type))
return extra
class ImproperlyConfigured(Exception):
pass
def list_releases(url):
releases = get_releases(url)
MsgUser.message("Available FSL versions for this OS:")
MsgUser.debug(releases)
for v, release in releases.items():
if 'date' in release:
rdate = release['date']
else:
rdate = "Third-party package"
MsgUser.message("%s\t(%s)" % (v, rdate))
def latest_release(url):
releases = get_releases(url)
MsgUser.debug("Got version information: %s" % (releases))
versions = [Version(x) for x in releases.keys()]
MsgUser.debug("Versions: %s" % (versions))
return releases[str(sorted(versions)[-1])]
class InstallInstallerError(Exception):
pass
def install_installer(fsldir):
'''Install this script into $FSLDIR/etc'''
targetfolder = os.path.join(fsldir, 'etc')
as_root = False
installer = os.path.abspath(__file__)
MsgUser.debug(
"Copying fslinstaller (%s) to %s" % (
installer,
targetfolder))
if not is_writeable(targetfolder):
if not is_writeable_as_root(targetfolder):
raise InstallInstallerError("Cannot write to folder as root user.")
else:
as_root = True
copy_file(
installer, os.path.join(targetfolder, "fslinstaller.py"),
as_root)
class InstallQuestions(object):
def __init__(self):
self.questions = {}
self.validators = {}
self.type = {}
self.default = {}
self.defaults = False
def add_question(self, key, question, default, qtype, validation_f):
self.questions[key] = question
self.default[key] = default
self.type[key] = qtype
self.validators[key] = validation_f
def ask_question(self, key, default=None):
# Ask a question
no_answer = True
validator = self.validators[key]
def parse_answer(q_type, answer):
if q_type == 'bool':
if answer.lower() == 'yes':
return True
else:
return False
else:
return answer
if not default:
default = self.default[key]
if self.defaults:
MsgUser.debug(self.questions[key])
MsgUser.debug("Automatically using the default %s" % (default))
self.answers[key] = parse_answer(self.type[key], default)
no_answer = False
while no_answer:
MsgUser.question(
"%s? %s:" % (
self.questions[key],
'[%s]' % (default)))
your_answer = raw_input()
MsgUser.debug("Your answer was %s" % (your_answer))
if your_answer == '':
MsgUser.debug("You want the default")
your_answer = default
if validator(your_answer):
answer = parse_answer(self.type[key], your_answer)
no_answer = False
MsgUser.debug("Returning the answer %s" % (answer))
return answer
def yes_no(answer):
if answer.lower() == 'yes' or answer.lower() == 'no':
return True
else:
MsgUser.message("Please enter yes or no.")
return False
def check_install_location(folder):
'''Don't allow relative paths'''
MsgUser.debug("Checking %s is an absolute path" % (folder))
if (folder == '.' or
folder == '..' or
folder.startswith('./') or
folder.startswith('../') or
folder.startswith('~')):
MsgUser.message("Please enter an absolute path.")
return False
return True
def external_validate(what_to_check):
'''We will validate elsewhere'''
return True
def check_fsl_install(fsldir):
'''Check if this folder contains FSL install'''
MsgUser.debug("Checking %s is an FSL install" % (fsldir))
if os.path.isdir(fsldir):
if os.path.exists(
os.path.join(fsldir, 'etc', 'fslversion')
):
return True
return False
def fsl_downloadname(suffix, version):
return 'fsl-%s-%s' % (
version, suffix)
class Settings(object):
version = version
title = "--- FSL Installer - Version %s ---" % (version)
main_server = 'fsl.fmrib.ox.ac.uk'
mirrors = [build_url_with_protocol('https',
main_server, ('fsldownloads',
'')), ]
mirrors_file = 'fslmirrorlist.txt'
manifest_json = 'manifest.json'
manifest_csv = 'manifest.csv'
main_mirror = mirrors[0]
mirror = main_mirror
applications = ['bin/fslview.app', 'bin/assistant.app']
x11 = {'bad_versions': [],
'download_url': "http://xquartz.macosforge.org/landing/",
'apps': ['XQuartz.app', 'X11.app', ],
'location': "/Applications/Utilities"}
default_location = '/usr/local/fsl'
post_inst_dir = "etc/fslconf"
inst_qus = InstallQuestions()
inst_qus.add_question('version_match',
"The requested version matches the installed "
"version - do you wish to re-install FSL",
'no', 'bool', yes_no)
inst_qus.add_question('location',
"Where would you like the FSL install to be "
"(including the FSL folder name)",
default_location, 'path', check_install_location)
inst_qus.add_question('del_old',
"FSL exists in the current location, "
"would you like to keep a backup of the old "
"version (N.B. You will not be able to use the old "
"version)",
'no', 'bool', yes_no)
inst_qus.add_question('create',
"Install location doesn't exist, should I create it",
'yes', 'bool', yes_no)
inst_qus.add_question('inst_loc',
"Where is the FSL folder (e.g. /usr/local/fsl)",
default_location, 'path', check_fsl_install)
inst_qus.add_question('skipmd5',
"I was unable to download the checksum of "
"the install file so cannot confirm it is correct. "
"Would you like to install anyway",
'no', 'bool', yes_no)
inst_qus.add_question('overwrite',
"There is already a local copy of the file, would "
"you like to overwrite it",
"yes", 'bool', yes_no)
inst_qus.add_question('upgrade',
"Would you like to install upgrade",
"yes", 'bool', yes_no)
inst_qus.add_question('update',
"Would you like to install update",
"yes", 'bool', yes_no)
def get_json(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
return json.load(url)
except OpenUrlError, e:
raise ServerFailure(str(e))
# [ linux, centos, x86_64, 6, filename, 'fname',
# version, 'version', date, 'date', checksum_type, 'checksum_type',
# checksum, 'checksum', supported, 'true/false', notes, 'notes',
# instructions, 'instructions']
# [ linux, redhat, alias, centos, supported, True/false, version, 'version' ]
# [ 'installer', filename, 'fname', version, 'version', date, 'date',
# checksum_type, 'checksum_type', checksum, 'checksum', supported,
# 'true/false', notes, 'notes', instructions, 'instructions']
# [ feeds, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
# [ sources, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
class AutoDict(dict):
'''Automatically create a nested dict'''
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def freeze(self):
'''Returns a dict representation of an AutoDict'''
frozen = {}
for k, v in self.items():
if type(v) == type(self):
frozen[k] = v.freeze()
else:
frozen[k] = v
return frozen
def get_csv_dict(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
manifest_reader = csv.reader(
url, delimiter=',', quoting=csv.QUOTE_MINIMAL)
a_dict = AutoDict()
for line in manifest_reader:
MsgUser.debug(line)
if line[0] == 'feeds':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'sources':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'installer':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
else:
# Install package or alias
if line[2] == 'alias':
items = iter(line[4:])
base_dict = dict(zip(items, items))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])] = base_dict
else:
items = iter(line[5:])
base_dict = dict(zip(items, items))
MsgUser.debug(
",".join(
(line[0], line[1], line[2], line[3], line[4])))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])][
str(line[4])] = base_dict
except OpenUrlError, e:
raise ServerFailure(str(e))
MsgUser.debug(a_dict)
return a_dict.freeze()
class InvalidVersion(Exception):
pass
def get_web_version_and_details(
server_url=Settings.mirror,
request_version=None):
if request_version is None:
details = latest_release(server_url)
try:
version = Version(details['version'])
except KeyError:
try:
redirect = details['redirect']
raise DownloadError(
"Installer not supported on this platform."
"Please visit %s for download instructions" % redirect)
except KeyError:
MsgUser.debug(
"Can't find version or redirect - %s" % details)
raise DownloadError(
"Unsupported OS"
)
else:
MsgUser.debug("Requested version %s" % request_version)
releases = get_releases(server_url)
try:
version = Version(request_version)
except ValueError:
raise DownloadError(
"%s doesn't look like a version" % request_version)
if request_version not in releases.keys():
raise DownloadError(
"%s isn't an available version" % request_version)
details = releases[request_version]
return (version, details)
def download_release(
server_url=Settings.mirror, to_temp=False,
request_version=None, skip_verify=False,
keep=False, source_code=False, feeds=False):
(version, details) = get_web_version_and_details(
server_url, request_version)
if request_version is None:
request_version = str(version)
if source_code or feeds:
if source_code:
extra_type = 'sources'
MsgUser.message("Downloading source code")
else:
extra_type = 'feeds'
MsgUser.message("Downloading FEEDS")
try:
releases = get_extra(server_url, extra_type)
except ExtraDownloadError, e:
raise DownloadError(
"Unable to find details for %s" % (extra_type)
)
to_temp = False
try:
details = releases[request_version]
except KeyError:
raise DownloadError(
"%s %s isn't available" % (request_version, extra_type)
)
MsgUser.debug(details)
if to_temp:
try:
(_, local_filename) = temp_file_name(close=True)
except Exception, e:
MsgUser.debug("Error getting temporary file name %s" % (str(e)))
raise DownloadError("Unable to begin download")
else:
local_filename = details['filename']
if os.path.exists(local_filename):
if os.path.isfile(local_filename):
MsgUser.message("%s exists" % (local_filename))
overwrite = Settings.inst_qus.ask_question('overwrite')
if overwrite:
MsgUser.warning(
"Erasing existing file %s" % local_filename)
try:
os.remove(local_filename)
except Exception:
raise DownloadError(
"Unabled to remove local file %s - remove"
" it and try again" % local_filename)
else:
raise DownloadError("Aborting download")
else:
raise DownloadError(
"There is a directory named %s "
"- cannot overwrite" % local_filename)
MsgUser.debug(
"Downloading to file %s "
"(this may take some time)." % (local_filename))
MsgUser.message(
"Downloading...")
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), details['filename']))
download_file(
url=file_url,
localf=local_filename)
if (not skip_verify and
(details['checksum'] !=
file_checksum(local_filename, details['checksum_type']))):
raise DownloadError('Downloaded file fails checksum')
MsgUser.ok("File downloaded")
except DownloadFileError, e:
MsgUser.debug(str(e))
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
raise DownloadError(str(e))
else:
downloaded = True
return (local_filename, version, details)
class DownloadError(Exception):
pass
def shell_config(shell, fsldir, skip_root=False):
MsgUser.debug("Building environment for %s" % (shell))
env_lines = ''
if shell == 'sh' or shell == 'bash':
if skip_root:
env_lines += '''if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh and dash doesn't setup the EUID environment var
EUID=`id -u`
fi
fi
if [ "$EUID" != "0" ]; then
'''
env_lines += '''
# FSL Setup
FSLDIR=%s
PATH=${FSLDIR}/bin:${PATH}
export FSLDIR PATH
. ${FSLDIR}/etc/fslconf/fsl.sh
'''
if skip_root:
env_lines += '''fi'''
match = "FSLDIR="
replace = "FSLDIR=%s"
elif shell == 'csh' or shell == 'tcsh':
if skip_root:
env_lines += '''if ( $uid != 0 ) then
'''
env_lines += '''
# FSL Setup
setenv FSLDIR %s
setenv PATH ${FSLDIR}/bin:${PATH}
source ${FSLDIR}/etc/fslconf/fsl.csh
'''
if skip_root:
env_lines += '''
endif'''
match = "setenv FSLDIR"
replace = "setenv FSLDIR %s"
elif shell == 'matlab':
env_lines = '''
%% FSL Setup
setenv( 'FSLDIR', '%s' );
fsldir = getenv('FSLDIR');
fsldirmpath = sprintf('%%s/etc/matlab',fsldir);
path(path, fsldirmpath);
clear fsldir fsldirmpath;
'''
match = "setenv( 'FSLDIR',"
replace = "setenv( 'FSLDIR', '%s' );"
else:
raise ValueError("Unknown shell type %s" % shell)
return (env_lines % (fsldir), match, replace % (fsldir))
def get_profile(shell):
home = os.path.expanduser("~")
dotprofile = os.path.join(home, '.profile')
if shell == 'bash':
profile = os.path.join(home, '.bash_profile')
if not os.path.isfile(profile) and os.path.isfile(dotprofile):
profile = dotprofile
elif shell == 'sh':
profile = dotprofile
else:
cshprofile = os.path.join(home, '.cshrc')
if shell == 'csh':
profile = cshprofile
elif shell == 'tcsh':
profile = os.path.join(home, '.tcshrc')
if not os.path.isfile(profile) and os.path.isfile(cshprofile):
profile = cshprofile
else:
raise ValueError("Unsupported shell")
return profile
class FixFslDirError(Exception):
pass
def fix_fsldir(shell, fsldir):
(_, match, replace) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug(
"Editing %s, replacing line beginning:%s with %s." %
(profile, match, replace))
try:
edit_file(profile, line_starts_replace, match, replace, False)
except EditFileError, e:
raise FixFslDirError(str(e))
class AddFslDirError(Exception):
pass
def add_fsldir(shell, fsldir):
(env_lines, _, _) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug("Adding %s to %s" % (env_lines, profile))
try:
add_to_file(profile, env_lines, False)
except AddToFileError, e:
raise AddFslDirError(str(e))
class ConfigureMatlabError(Exception):
pass
class ConfigureMatlabWarn(Exception):
pass
def configure_matlab(fsldir, m_startup='', c_file=True):
'''Setup your startup.m file to enable FSL MATLAB functions to work'''
(mlines, match, replace) = shell_config('matlab', fsldir)
if m_startup == '':
m_startup = os.path.join(
os.path.expanduser('~'), 'matlab', 'startup.m')
if os.path.exists(m_startup):
# Check if already configured
MsgUser.debug("Looking for %s in %s" % (match, m_startup))
if file_contains(m_startup, match):
try:
MsgUser.debug('Updating MATLAB startup file.')
edit_file(
m_startup, line_starts_replace,
match, replace, False)
except EditFileError, e:
raise ConfigureMatlabError(str(e))
else:
MsgUser.debug('Adding FSL settings to MATLAB.')
try:
add_to_file(m_startup, mlines, False)
except AddToFileError, e:
raise ConfigureMatlabError(str(e))
elif c_file:
# No startup.m file found. Create one
try:
MsgUser.debug('No MATLAB startup.m file found, creating one.')
if not os.path.isdir(os.path.dirname(m_startup)):
MsgUser.debug('No MATLAB startup.m file found, creating one.')
os.mkdir(os.path.dirname(m_startup))
create_file(m_startup, mlines, False)
except (OSError, CreateFileError), e:
MsgUser.debug(
'Unable to create ~/matlab folder or startup.m file,'
' cannot configure (%).' % (str(e)))
raise ConfigureMatlabError(
"Unable to create your ~/matlab folder or startup.m, "
"so cannot configure MATLAB for FSL.")
else:
MsgUser.debug('MATLAB may not be installed, doing nothing.')
raise ConfigureMatlabWarn("I can't tell if you have MATLAB installed.")
class SetupEnvironmentError(Exception):
pass
class SetupEnvironmentSkip(Exception):
pass
def setup_system_environment(fsldir):
'''Add a system-wide profile setting up FSL for all users.
Only supported on Redhat/Centos'''
profile_d = '/etc/profile.d'
profile_files = ['fsl.sh', 'fsl.csh']
exceptions = []
skips = []
if os.getuid() != 0:
sudo = True
else:
sudo = False
if os.path.isdir(profile_d):
for profile in profile_files:
pf = profile.split('.')[1]
(lines, match, replace) = shell_config(pf, fsldir)
this_profile = os.path.join(profile_d, profile)
if os.path.exists(this_profile):
# Already has a profile file
# Does it contain an exact match for current FSLDIR?
match = file_contains_1stline(this_profile, replace)
if match != '':
# If there is an fsl.(c)sh then just fix
# the entry for FSLDIR
MsgUser.debug(
"Fixing %s for FSLDIR location." % (this_profile))
try:
edit_file(
this_profile, line_starts_replace,
match, replace, sudo)
except EditFileError, e:
exceptions.append(str(e))
else:
# No need to do anything
MsgUser.debug(
"%s already configured - skipping." %
(this_profile))
skips.append(profile)
else:
# Create the file
try:
create_file(this_profile, lines, sudo)
except CreateFileError, e:
exceptions.append(str(e))
else:
raise SetupEnvironmentError(
"No system-wide configuration folder found - Skipped")
if exceptions:
raise SetupEnvironmentError(".".join(exceptions))
if skips:
raise SetupEnvironmentSkip(".".join(skips))
def setup_environment(fsldir=None, system=False, with_matlab=False):
'''Setup the user's environment so that their
terminal finds the FSL tools etc.'''
# Check for presence of profile file:
if fsldir is None:
fsldir = get_fsldir()
user_shell = which_shell()
MsgUser.debug("User's shell is %s" % (user_shell))
try:
profile_lines = shell_config(user_shell, fsldir)
profile = get_profile(user_shell)
except ValueError, e:
raise SetupEnvironmentError(str(e))
cfile = False
if not os.path.isfile(profile):
MsgUser.debug("User is missing a shell setup file.")
cfile = True
if cfile:
MsgUser.debug("Creating file %s" % (profile))
try:
create_file(profile, profile_lines, False)
except CreateFileError, e:
raise SetupEnvironmentError(
"Unable to create profile %s" % (profile))
else:
# Check if user already has FSLDIR set
MsgUser.message("Setting up FSL software...")
try:
if file_contains(profile, "FSLDIR"):
MsgUser.debug("Updating FSLDIR entry.")
fix_fsldir(user_shell, fsldir)
else:
MsgUser.debug("Adding FSLDIR entry.")
add_fsldir(user_shell, fsldir)
except (AddFslDirError, FixFslDirError), e:
raise SetupEnvironmentError(
"Unable to update your profile %s"
" with FSL settings" % (profile))
if with_matlab:
MsgUser.debug("Setting up MATLAB")
try:
configure_matlab(fsldir)
except ConfigureMatlabError, e:
MsgUser.debug(str(e))
raise SetupEnvironmentError(str(e))
except ConfigureMatlabWarn, e:
MsgUser.skipped(str(e))
class PostInstallError(Exception):
pass
class InstallArchiveError(Exception):
pass
class UnknownArchiveType(Exception):
pass
def archive_type(archive):
'''Determine file type based on extension and check
that file looks like this file type'''
archive_types = {
'gzip': ('tar', '-z'),
'bzip2': ('tar', '-j'),
'zip': ('zip', ''), }
try:
file_type = run_cmd("file %s" % (archive))
except RunCommandError, e:
raise UnknownArchiveType(str(e))
file_type = file_type.lower()
for f_type in ('gzip', 'bzip2', 'zip', ):
if f_type in file_type:
return archive_types[f_type]
raise UnknownArchiveType(archive)
def post_install(
fsldir, settings, script="post_install.sh", quiet=False,
app_links=False, x11=False):
MsgUser.message("Performing post install tasks")
if is_writeable(fsldir):
as_root = False
elif is_writeable_as_root(fsldir):
as_root = True
else:
raise PostInstallError(
"Unable to write to target folder (%s)" % (fsldir))
install_installer(fsldir)
script_path = os.path.join(fsldir, Settings.post_inst_dir, script)
if x11:
try:
check_X11(settings.x11)
except CheckX11Warning, e:
MsgUser.warning(str(e))
else:
MsgUser.ok("X11 (required for GUIs) found")
if os.path.exists(script_path):
MsgUser.debug("Found post-install script %s" % (script_path))
if not os.access(script_path, os.X_OK):
raise PostInstallError(
"Unable to run post install script %s" % (script_path)
)
script_opts = '-f "%s"' % (fsldir)
if quiet:
script_opts += " -q"
command_line = " ".join((script_path, script_opts))
try:
run_cmd_displayoutput(command_line, as_root=as_root)
except RunCommandError, e:
raise PostInstallError(
"Error running post installation script (error %s)"
" - check the install log" % (str(e))
)
# Work around for mistake in 5.0.10 post setup script
mal = os.path.join(
fsldir, Settings.post_inst_dir,
'make_applications_links.sh')
if (os.path.exists(mal) and
not file_contains(script_path, "make_applications_links.sh")):
MsgUser.debug(
"Work around necessary for missing app link creation")
else:
app_links = False
if app_links:
try:
make_applications_links(fsldir, settings.applications)
except MakeApplicationLinksError, e:
for message in e.app_messages.values():
MsgUser.warning(message)
else:
MsgUser.ok("/Applications links created/updated")
MsgUser.ok("Post installation setup complete")
def install_archive(archive, fsldir=None):
def clean_up_temp():
try:
safe_delete(tempfolder, as_root)
except SafeDeleteError, sd_e:
MsgUser.debug(
"Unable to clean up temporary folder! "
"%s" % (str(sd_e)))
if not os.path.isfile(archive):
raise InstallError("%s isn't a file" % (archive))
if not fsldir:
try:
fsldir = get_fsldir(specified_dir=fsldir, install=True)
except GetFslDirError, e:
raise InstallError(str(e))
MsgUser.debug("Requested install of %s as %s" % (archive, fsldir))
if os.path.exists(fsldir):
# move old one out of way
MsgUser.debug("FSL version already installed")
keep_old = Settings.inst_qus.ask_question('del_old')
else:
keep_old = False
install_d = os.path.dirname(fsldir)
MsgUser.debug("Checking %s is writeable." % (install_d))
if is_writeable(install_d):
as_root = False
elif is_writeable_as_root(install_d):
as_root = True
else:
raise InstallArchiveError(
"Unable to write to target folder (%s), "
"even as a super user." % (install_d))
MsgUser.debug("Does %s require root for deletion? %s" % (
install_d, as_root))
try:
unarchive, ua_option = archive_type(archive)
except UnknownArchiveType, e:
raise InstallArchiveError(str(e))
# Generate a temporary name - eg fsl-<mypid>-date
tempname = '-'.join(('fsl', str(os.getpid()), str(time.time())))
tempfolder = os.path.join(install_d, tempname)
try:
run_cmd_dropstdout("mkdir %s" % (tempfolder), as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError(
"Unable to create folder to install into.")
MsgUser.debug(
"Unpacking %s into folder %s." % (archive, tempfolder))
try:
if unarchive == 'tar':
unpack_cmd = 'tar -C %s -x %s -o -f %s' % (
tempfolder, ua_option, archive)
elif unarchive == 'zip':
MsgUser.debug(
"Calling unzip %s %s" % (ua_option, archive)
)
unpack_cmd = 'unzip %s %s' % (ua_option, archive)
try:
run_cmd_dropstdout(unpack_cmd, as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError("Unable to unpack FSL.")
new_fsl = os.path.join(tempfolder, 'fsl')
if os.path.exists(fsldir):
# move old one out of way
try:
old_version = get_installed_version(fsldir)
except (NotAFslVersion, GetInstalledVersionError), e:
if keep_old:
old_version = Version('0.0.0')
MsgUser.warning(
"The contents of %s doesn't look like an "
"FSL installation! - "
"moving to fsl-0.0.0" % (fsldir))
old_fsl = '-'.join((fsldir, str(old_version)))
if os.path.exists(old_fsl):
MsgUser.debug(
"Looks like there is another copy of the "
"old version of FSL - deleting...")
try:
safe_delete(old_fsl, as_root)
except SafeDeleteError, e:
raise InstallError(
";".join((
"Install location already has a "
"%s - I've tried to delete it but"
" failed" % (old_fsl), str(e))))
if keep_old:
try:
MsgUser.debug(
"Moving %s to %s" % (fsldir, old_fsl))
move(fsldir, old_fsl, as_root)
MsgUser.message(
'''You can find your archived version of FSL in %s.
If you wish to restore it, remove %s and rename %s to %s''' % (
old_fsl, fsldir, old_fsl, fsldir))
except MoveError, mv_e:
# failed to move the old version
MsgUser.debug(
"Failed to move old version "
"- %s" % (str(mv_e)))
raise InstallError(
"Failed to backup old version (%s)" % (str(mv_e)))
else:
MsgUser.debug("Removing existing FSL install")
try:
safe_delete(fsldir, as_root)
MsgUser.debug("Deleted %s." % (fsldir))
except SafeDeleteError, e:
raise InstallError(
"Failed to delete %s - %s." % (fsldir, str(e)))
else:
old_fsl = ''
try:
MsgUser.debug("Moving %s to %s" % (new_fsl, fsldir))
move(new_fsl, fsldir, as_root)
except MoveError, e:
# Unable to move new install into place
MsgUser.debug(
"Move failed - %s." % (str(e)))
raise InstallError(
'Failed to move new version into place.')
except InstallError, e:
clean_up_temp()
raise InstallArchiveError(str(e))
clean_up_temp()
MsgUser.debug("Install complete")
MsgUser.ok("FSL software installed.")
return fsldir
def check_for_updates(url, fsldir, requested_v=None):
# Start an update
MsgUser.message("Looking for new version.")
try:
this_version = get_installed_version(fsldir)
except GetInstalledVersionError, e:
# We can't find an installed version of FSL!
raise InstallError(str(e))
else:
MsgUser.debug("You have version %s" % (this_version))
if not requested_v:
version = Version(latest_release(url)['version'])
else:
try:
version = Version(requested_v)
except NotAFslVersion:
raise InstallError(
"%s doesn't look like a version" % requested_v)
if version > this_version:
# Update Available
if version.major > this_version.major:
# We don't support patching between major
# versions so download a fresh copy
return (UPGRADE, version)
else:
return (UPDATE, version)
else:
return (CURRENT, None)
class MakeApplicationLinksError(Exception):
def __init__(self, *args):
super(MakeApplicationLinksError, self).__init__(*args)
try:
self.app_messages = args[0]
except IndexError:
self.app_messages = []
def make_applications_links(fsldir, apps):
'''Create symlinks in /Applications'''
MsgUser.message("Creating Application links...")
results = {}
for app in apps:
app_location = os.path.join('/Applications', os.path.basename(app))
app_target = os.path.join(fsldir, app)
create_link = True
MsgUser.debug("Looking for existing link %s" % (app_location))
if os.path.lexists(app_location):
MsgUser.debug(
"Is a link: %s; realpath: %s" % (
os.path.islink(app_location),
os.path.realpath(app_location)))
if os.path.islink(app_location):
MsgUser.debug("A link already exists.")
if os.path.realpath(app_location) != app_target:
MsgUser.debug(
"Deleting old (incorrect) link %s" % (app_location))
try:
run_cmd_dropstdout("rm " + app_location, as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to remove broken"
" link to %s (%s)." % (app_target, str(e)))
results[app] = 'Unable to remove broken link to %s' % (
app_target)
create_link = False
else:
MsgUser.debug("Link is correct, skipping.")
create_link = False
else:
MsgUser.debug(
"%s doesn't look like a symlink, "
"so let's not delete it." % (app_location))
results[app] = (
"%s is not a link so hasn't been updated to point at the "
"new FSL install.") % (app_location)
create_link = False
if create_link:
MsgUser.debug('Create a link for %s' % (app))
if os.path.exists(app_target):
try:
run_cmd_dropstdout(
"ln -s %s %s" % (app_target, app_location),
as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to create link to %s (%s)." % (
app_target, str(e)))
results[app] = (
'Unable to create link to %s.') % (app_target)
else:
MsgUser.debug(
'Unable to find application'
' %s to link to.') % (app_target)
if results:
raise MakeApplicationLinksError(results)
class CheckX11Warning(Exception):
pass
def check_X11(x11):
'''Function to find X11 install on Mac OS X and confirm it is compatible.
Advise user to download Xquartz if necessary'''
MsgUser.message(
"Checking for X11 windowing system (required for FSL GUIs).")
xbin = ''
for x in x11['apps']:
if os.path.exists(os.path.join(x11['location'], x)):
xbin = x
if xbin != '':
# Find out what version is installed
x_v_cmd = [
'/usr/bin/mdls', '-name',
'kMDItemVersion', os.path.join(x11['location'], xbin)]
try:
cmd = Popen(x_v_cmd, stdout=PIPE, stderr=STDOUT)
(vstring, _) = cmd.communicate()
except Exception, e:
raise CheckX11Warning(
"Unable to check X11 version (%s)" % (str(e)))
if cmd.returncode:
MsgUser.debug("Error finding the version of X11 (%s)" % (vstring))
# App found, but can't tell version, warn the user
raise CheckX11Warning(
"X11 (required for FSL GUIs) is installed but I"
" can't tell what the version is.")
else:
# Returns:
# kMDItemVersion = "2.3.6"\n
(_, _, version) = vstring.strip().split()
if version.startswith('"'):
version = version[1:-1]
if version in x11['bad_versions']:
raise CheckX11Warning(
"X11 (required for FSL GUIs) is a version that"
" is known to cause problems. We suggest you"
" upgrade to the latest XQuartz release from "
"%s" % (x11['download_url']))
else:
MsgUser.debug(
"X11 found and is not a bad version"
" (%s: %s)." % (xbin, version))
else:
# No X11 found, warn the user
raise CheckX11Warning(
"The FSL GUIs require the X11 window system which I can't"
" find in the usual places. You can download a copy from %s"
" - you will need to install this before the GUIs will"
" function" % (x11['download_url']))
def do_install(options, settings):
MsgUser.message(
shell_colours.bold + settings.title + shell_colours.default)
if options.test_installer:
settings.main_mirror = options.test_installer
this_computer = Host
if not this_computer.supported:
MsgUser.debug("Unsupported host %s %s %s" % (
this_computer.o_s,
this_computer.arch,
this_computer.os_type))
raise InstallError(
"Unsupported host - you could try building from source")
if this_computer.o_s == "linux":
system_environment = True
with_matlab = False
application_links = False
x11 = False
elif this_computer.o_s == "darwin":
system_environment = False
with_matlab = True
application_links = True
x11 = True
else:
MsgUser.debug("Unrecognised OS %s" % (this_computer.o_s))
raise InstallError("Unrecognised OS")
my_uid = os.getuid()
def configure_environment(fsldir, env_all=False, skip=False, matlab=False):
if skip:
return
if env_all:
if system_environment:
# Setup the system-wise environment
try:
setup_system_environment(fsldir)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(
"Failed to configure system-wide profiles "
"with FSL settings: %s" % (str(e)))
except SetupEnvironmentSkip, e:
MsgUser.skipped(
"Some shells already configured: %s" % (str(e)))
else:
MsgUser.debug("System-wide profiles setup.")
MsgUser.ok("System-wide FSL configuration complete.")
else:
MsgUser.skipped(
"System-wide profiles not supported on this OS")
elif my_uid != 0:
# Setup the environment for the current user
try:
setup_environment(fsldir, with_matlab=matlab)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(str(e))
else:
MsgUser.ok(
"User profile updated with FSL settings, you will need "
"to log out and back in to use the FSL tools.")
if my_uid != 0:
if options.quiet:
settings.inst_qus.defaults = True
print '''
We may need administrator rights, but you have specified fully automated
mode - you may still be asked for an admin password if required.'''
print '''
To install fully automatedly, either ensure this is running as the root
user (use sudo) or that you can write to the folder you wish to install
FSL in.'''
elif (not options.download and
not options.list_versions and
not options.get_source and
not options.get_feeds):
MsgUser.warning(
'''Some operations of the installer require administative rights,
for example installing into the default folder of /usr/local.
If your account is an 'Administrator' (you have 'sudo' rights)
then you will be prompted for your administrator password
when necessary.''')
if not options.d_dir and options.quiet:
raise InstallError(
"Quiet mode requires you to specify the install location"
" (e.g. /usr/local)")
if not options.quiet and not options.list_versions:
MsgUser.message(
"When asked a question, the default answer is given in square "
"brackets.\nHit the Enter key to accept this default answer.")
if options.env_only and my_uid != 0:
configure_environment(
get_fsldir(specified_dir=options.d_dir),
options.env_all)
return
if options.archive:
if not options.skipchecksum:
if not options.checksum:
raise InstallError(
"No checksum provided and checking not disabled")
else:
checksummer = globals()[options.checksum_type + 'File']
if options.checksum != checksummer(options.archive):
raise InstallError("FSL archive doesn't match checksum")
else:
MsgUser.ok("FSL Package looks good")
arc_version = archive_version(options.archive)
MsgUser.message(
"Installing FSL software version %s..." % (arc_version))
fsldir = install_archive(
archive=options.archive, fsldir=options.d_dir)
try:
post_install(fsldir=fsldir, settings=settings, quiet=options.quiet)
except PostInstallError, e:
raise InstallError(str(e))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
return
# All the following options require the Internet...
try:
settings.mirror = fastest_mirror(
settings.mirrors, settings.mirrors_file)
except SiteNotResponding, e:
# We can't find the FSL site - possibly the internet is down
raise InstallError(e)
try:
self_update(settings.mirror)
except SelfUpdateError, e:
MsgUser.debug("Self update error: %s" % (str(e)))
MsgUser.warning("Error checking for updates to installer - continuing")
if options.list_versions:
# Download a list of available downloads from the webserver
list_releases(settings.mirror)
return
if options.download:
MsgUser.debug("Attempting to download latest release")
try:
download_release(request_version=options.requestversion,
skip_verify=options.skipchecksum)
except DownloadFileError, e:
raise("Unable to download release %s" % (str(e)))
return
if options.update:
fsldir = get_fsldir()
status, new_v = check_for_updates(settings.mirror, fsldir=fsldir)
if status == UPDATE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('update'):
return
elif status == UPGRADE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('upgrade'):
return
else:
MsgUser.ok("FSL is up-to-date.")
return
if options.get_source:
MsgUser.debug("Attempting to download source")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
source_code=True)
except DownloadFileError, e:
raise("Unable to download source code %s" % (str(e)))
return
if options.get_feeds:
MsgUser.debug("Attempting to download FEEDS")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
feeds=True)
except DownloadFileError, e:
raise("Unable to download FEEDS %s" % (str(e)))
return
try:
(version, details) = get_web_version_and_details(
request_version=options.requestversion
)
if 'redirect' in details:
MsgUser.message("Please download FSL using the instructions here:")
MsgUser.message("%s" % (details['redirect']))
return
fsldir = get_fsldir(specified_dir=options.d_dir, install=True)
reinstall = True
if os.path.exists(fsldir):
inst_version = get_installed_version(fsldir)
if inst_version == version:
reinstall = Settings.inst_qus.ask_question('version_match')
if reinstall:
(fname, version, details) = download_release(
to_temp=True,
request_version=options.requestversion,
skip_verify=options.skipchecksum)
if not details['supported']:
MsgUser.debug(
"This OS is not officially supported -"
" you may experience issues"
)
MsgUser.debug(
"Installing %s from %s (details: %s)" % (
fname, version, details))
MsgUser.message(
"Installing FSL software version %s..." % (version))
install_archive(
archive=fname, fsldir=fsldir)
try:
safe_delete(fname)
except SafeDeleteError, e:
MsgUser.debug(
"Unable to delete downloaded package %s ; %s" % (
fname, str(e)))
if details['notes']:
MsgUser.message(details['notes'])
try:
post_install(
fsldir=fsldir, settings=settings,
quiet=options.quiet, x11=x11,
app_links=application_links)
except PostInstallError, e:
raise InstallError(str(e))
except DownloadError, e:
MsgUser.debug("Unable to download FSL %s" % (str(e)))
raise InstallError("Unable to download FSL")
except InstallArchiveError, e:
MsgUser.debug("Unable to unpack FSL ; %s" % (str(e)))
raise InstallError("Unable to unpack FSL - %s" % (str(e)))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
if details['notes']:
MsgUser.message(details['notes'])
def parse_options(args):
usage = "usage: %prog [options]"
ver = "%%prog %s" % (version)
parser = OptionParser(usage=usage, version=ver)
parser.add_option("-d", "--dest", dest="d_dir",
help="Install into folder given by DESTDIR - "
"e.g. /usr/local/fsl",
metavar="DESTDIR", action="store",
type="string")
parser.add_option("-e", dest="env_only",
help="Only setup/update your environment",
action="store_true")
parser.add_option("-E", dest="env_all",
help="Setup/update the environment for ALL users",
action="store_true")
parser.add_option("-v", help="Print version number and exit",
action="version")
parser.add_option("-c", "--checkupdate", dest='update',
help="Check for FSL updates -"
" needs an internet connection",
action="store_true")
parser.add_option("-o", "--downloadonly", dest="download",
help=SUPPRESS_HELP,
action="store_true")
advanced_group = OptionGroup(
parser, "Advanced Install Options",
"These are advanced install options")
advanced_group.add_option(
"-l", "--listversions", dest="list_versions",
help="List available versions of FSL",
action="store_true")
advanced_group.add_option(
"-V", "--fslversion", dest="requestversion",
help="Download the specific version FSLVERSION of FSL",
metavar="FSLVERSION", action="store",
type="string")
advanced_group.add_option(
"-s", "--source", dest="get_source",
help="Download source code for FSL",
action="store_true")
advanced_group.add_option(
"-F", "--feeds", dest="get_feeds",
help="Download FEEDS",
action="store_true")
advanced_group.add_option(
"-q", "--quiet", dest='quiet',
help="Silence all messages - useful if scripting install",
action="store_true")
advanced_group.add_option(
"-p", dest="skip_env",
help="Don't setup the environment",
action="store_true")
parser.add_option_group(advanced_group)
debug_group = OptionGroup(
parser, "Debugging Options",
"These are for use if you have a problem running this installer.")
debug_group.add_option(
"-f", "--file", dest="archive",
help="Install a pre-downloaded copy of the FSL archive",
metavar="ARCHIVEFILE", action="store",
type="string")
debug_group.add_option(
"-C", "--checksum", dest="checksum",
help="Supply the expected checksum for the pre-downloaded FSL archive",
metavar="CHECKSUM", action="store",
type="string")
debug_group.add_option(
"-T", "--checksum-type", dest="checksum_type",
default="sha256",
help="Specify the type of checksum",
action="store",
type="string")
debug_group.add_option(
"-M", "--nochecksum", dest="skipchecksum",
help="Don't check the pre-downloaded FSL archive",
action="store_true")
debug_group.add_option(
"-D", dest="verbose",
help="Switch on debug messages",
action="store_true")
debug_group.add_option(
"-G", dest="test_installer",
help=SUPPRESS_HELP,
action="store",
type="string")
debug_group.add_option(
"-w", dest="test_csv",
help=SUPPRESS_HELP,
action="store_true"
)
parser.add_option_group(debug_group)
return parser.parse_args(args)
if __name__ == '__main__':
(options, args) = parse_options(sys.argv[1:])
if options.verbose:
MsgUser.debugOn()
print options
if options.quiet:
MsgUser.quietOn()
if options.test_csv:
HAS_JSON = False
installer_settings = Settings()
try:
do_install(options, installer_settings)
except BadVersion, e:
MsgUser.debug(str(e))
MsgUser.failed("Unable to find requested version!")
sys.exit(1)
except (InstallError, GetFslDirError, GetInstalledVersionError), e:
MsgUser.failed(str(e))
sys.exit(1)
except UnsupportedOs, e:
MsgUser.failed(str(e))
sys.exit(1)
except KeyboardInterrupt, e:
MsgUser.message('')
MsgUser.failed("Install aborted.")
sys.exit(1)
|
py | 7dfe54de2f4aa8baf400778f44cf5c6587709754 | import glob
import os
from pathlib import Path
import tarfile
import numpy as np
import pyarrow as pa
import pyarrow.csv as pcsv
import pyarrow.parquet as pq
from scipy import io
import pandas as pd
from get_12ECG_features import get_12ECG_features
# Find unique classes.
def get_classes(input_directory, filenames, static=True):
if static:
class_path = os.path.join(Path(input_directory).parents[1], "dx_mapping_scored.csv")
#class_matrix = pcsv.read_csv(class_path).to_pandas()
class_matrix = pd.read_csv(class_path)
classes = class_matrix["SNOMED CT Code"].astype(str)
return list(set(classes))
else:
classes = set()
for filename in filenames:
with open(filename, 'r') as f:
for l in f:
if l.startswith('#Dx'):
tmp = l.split(': ')[1].split(',')
for c in tmp:
classes.add(c.strip())
return sorted(classes)
# Load challenge data.
def load_challenge_data(header_file):
with open(header_file, 'r') as f:
header = f.readlines()
mat_file = header_file.replace('.hea', '.mat')
x = io.loadmat(mat_file)
recording = np.asarray(x['val'], dtype=np.float64)
return recording, header
project_path = Path(__file__).parents[2]
input_directory = project_path / "data/raw"
output_directory = project_path / "data/processed"
folders = ["train", "validation", "test"]
for folder in folders:
input_directory_path = os.path.join(input_directory, folder)
output_directory_path = os.path.join(output_directory, folder)
# make sure output path exist create if not
Path(output_directory_path).mkdir(exist_ok=True)
# loop in raw data folders
for input_folder in glob.glob(f"{input_directory_path}/*"):
folder_name = input_folder.split(os.path.sep)[-1]
print(f"processing {folder_name} data..")
header_files = []
for f in os.listdir(input_folder):
g = os.path.join(input_folder, f)
if not f.lower().startswith('.') and f.lower().endswith('hea') and os.path.isfile(g):
header_files.append(g)
classes = get_classes(input_folder, header_files)
num_classes = len(classes)
num_files = len(header_files)
recordings = list()
headers = list()
for i in range(num_files):
recording, header = load_challenge_data(header_files[i])
recordings.append(recording)
headers.append(header)
features = list()
labels = list()
label = list()
for i in range(num_files):
recording = recordings[i]
header = headers[i]
tmp, num_leads = get_12ECG_features(recording, header)
features.append(tmp)
for l in header:
if l.startswith('#Dx:'):
label = list()
arrs = l.strip().split(' ')
for arr in arrs[1].split(','):
# if label not in our labelss
if arr.rstrip() not in classes:
label = -1
continue
else:
label = classes.index(arr.rstrip())
break # Only use first positive index
labels.append(label)
features = np.array(features)
labels = np.array(labels)
# filter labels which not in our labels
other_class_mask = labels != -1
features = features[other_class_mask]
labels = labels[other_class_mask]
feature_list = ["age","sex","mean_RR", "mean_Peaks", "median_RR", "median_Peaks", "std_RR", "std_Peaks", "var_RR", "var_Peaks", "skew_RR", "skew_Peaks", "kurt_RR", "kurt_Peaks"]
# with loop we get fields and values dynamically
fields = [
('label', pa.int64()),
]
table_arrays = [
pa.array(labels),
]
ix = 0
for l in range(num_leads):
for f in feature_list:
fields += (f'lead{l+1}_{f}', pa.float32()),
table_arrays += pa.array(features[:, ix]),
ix += 1
# create parquet objects
schema = pa.schema(fields)
table = pa.Table.from_arrays(
table_arrays,
schema=schema,
)
print(f"writing {folder_name} data..")
# write concated data to parquet
output_path_labels = os.path.join(output_directory_path, f"processed_{folder_name}.parquet")
pq.write_table(table, where=output_path_labels)
|
py | 7dfe56ae21a693f1ffe97eaea31e4a141d03a816 | from __future__ import unicode_literals
from __future__ import print_function
import click
import getpass
import os
import sys
import platform
from builtins import input
from mssqlcli.config import config_location
from mssqlcli.__init__ import __version__
click.disable_unicode_literals_warning = True
try:
from urlparse import urlparse, unquote, parse_qs
except ImportError:
from urllib.parse import urlparse, unquote, parse_qs
from mssqlcli.mssqlclioptionsparser import create_parser
import mssqlcli.telemetry as telemetry_session
MSSQLCLI_TELEMETRY_PROMPT = """
Telemetry
---------
By default, mssql-cli collects usage data in order to improve your experience.
The data is anonymous and does not include commandline argument values.
The data is collected by Microsoft.
Disable telemetry collection by setting environment variable MSSQL_CLI_TELEMETRY_OPTOUT to 'True' or '1'.
Microsoft Privacy statement: https://privacy.microsoft.com/privacystatement
"""
def run_cli_with(options):
if create_config_dir_for_first_use():
display_telemetry_message()
display_version_message(options)
configure_and_update_options(options)
# Importing MssqlCli creates a config dir by default.
# Moved import here so we can create the config dir for first use prior.
from mssqlcli.mssql_cli import MssqlCli
mssqlcli = MssqlCli(options)
mssqlcli.connect_to_database()
telemetry_session.set_server_information(mssqlcli.mssqlcliclient_main)
mssqlcli.run()
def configure_and_update_options(options):
if options.dac_connection and options.server and not options.server.lower().startswith("admin:"):
options.server = "admin:" + options.server
if not options.integrated_auth:
if not options.username:
options.username = input(u'Username (press enter for sa):') or u'sa'
if not options.password:
options.password = getpass.getpass()
def create_config_dir_for_first_use():
config_dir = os.path.dirname(config_location())
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return True
return False
def display_version_message(options):
if options.version:
print('Version:', __version__)
sys.exit(0)
def display_telemetry_message():
print(MSSQLCLI_TELEMETRY_PROMPT)
if __name__ == "__main__":
try:
telemetry_session.start()
mssqlcli_options_parser = create_parser()
mssqlcli_options = mssqlcli_options_parser.parse_args(sys.argv[1:])
run_cli_with(mssqlcli_options)
finally:
# Upload telemetry async in a separate process.
telemetry_session.conclude()
|
py | 7dfe56b83388ef31a61f23c8075081715557a41f | # -*- coding: utf-8 -*-
'''
Directly manage the Salt fileserver plugins
'''
from __future__ import absolute_import
# Import Salt libs
import salt.utils
import salt.fileserver
def envs(backend=None, sources=False, outputter=None):
'''
Return the available fileserver environments. If no backend is provided,
then the environments for all configured backends will be returned.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.envs
salt-run fileserver.envs backend=roots,git
salt-run fileserver.envs git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
output = fileserver.envs(back=backend, sources=sources)
if outputter:
salt.utils.warn_until(
'Boron',
'The \'outputter\' argument to the fileserver.envs runner has '
'been deprecated. Please specify an outputter using --out. '
'See the output of \'salt-run -h\' for more information.'
)
return {'outputter': outputter, 'data': output}
else:
return output
def file_list(saltenv='base', backend=None, outputter=None):
'''
Return a list of files from the salt fileserver
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt-run fileserver.file_list
salt-run fileserver.file_list saltenv=prod
salt-run fileserver.file_list saltenv=dev backend=git
salt-run fileserver.file_list base hg,roots
salt-run fileserver.file_list -git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
load = {'saltenv': saltenv, 'fsbackend': backend}
output = fileserver.file_list(load=load)
if outputter:
salt.utils.warn_until(
'Boron',
'The \'outputter\' argument to the fileserver.file_list runner '
'has been deprecated. Please specify an outputter using --out. '
'See the output of \'salt-run -h\' for more information.'
)
return {'outputter': outputter, 'data': output}
else:
return output
def symlink_list(saltenv='base', backend=None, outputter=None):
'''
Return a list of symlinked files and dirs
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run fileserver.symlink_list
salt-run fileserver.symlink_list saltenv=prod
salt-run fileserver.symlink_list saltenv=dev backend=git
salt-run fileserver.symlink_list base hg,roots
salt-run fileserver.symlink_list -git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
load = {'saltenv': saltenv, 'fsbackend': backend}
output = fileserver.symlink_list(load=load)
if outputter:
salt.utils.warn_until(
'Boron',
'The \'outputter\' argument to the fileserver.symlink_list '
'runner has been deprecated. Please specify an outputter using '
'--out. See the output of \'salt-run -h\' for more information.'
)
return {'outputter': outputter, 'data': output}
else:
return output
def dir_list(saltenv='base', backend=None, outputter=None):
'''
Return a list of directories in the given environment
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run fileserver.dir_list
salt-run fileserver.dir_list saltenv=prod
salt-run fileserver.dir_list saltenv=dev backend=git
salt-run fileserver.dir_list base hg,roots
salt-run fileserver.dir_list -git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
load = {'saltenv': saltenv, 'fsbackend': backend}
output = fileserver.dir_list(load=load)
if outputter:
salt.utils.warn_until(
'Boron',
'The \'outputter\' argument to the fileserver.dir_list runner '
'has been deprecated. Please specify an outputter using --out. '
'See the output of \'salt-run -h\' for more information.'
)
return {'outputter': outputter, 'data': output}
else:
return output
def empty_dir_list(saltenv='base', backend=None, outputter=None):
'''
.. versionadded:: 2015.5.0
Return a list of empty directories in the given environment
saltenv : base
The salt fileserver environment to be listed
backend
Narrow fileserver backends to a subset of the enabled ones. If all
passed backends start with a minus sign (``-``), then these backends
will be excluded from the enabled backends. However, if there is a mix
of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus sign will
be disregarded.
.. note::
Some backends (such as :mod:`git <salt.fileserver.gitfs>` and
:mod:`hg <salt.fileserver.hgfs>`) do not support empty directories.
So, passing ``backend=git`` or ``backend=hg`` will result in an
empty list being returned.
CLI Example:
.. code-block:: bash
salt-run fileserver.empty_dir_list
salt-run fileserver.empty_dir_list saltenv=prod
salt-run fileserver.empty_dir_list backend=roots
'''
fileserver = salt.fileserver.Fileserver(__opts__)
load = {'saltenv': saltenv, 'fsbackend': backend}
output = fileserver.file_list_emptydirs(load=load)
if outputter:
salt.utils.warn_until(
'Boron',
'The \'outputter\' argument to the fileserver.empty_dir_list '
'runner has been deprecated. Please specify an outputter using '
'--out. See the output of \'salt-run -h\' for more information.'
)
return {'outputter': outputter, 'data': output}
else:
return output
def update(backend=None):
'''
Update the fileserver cache. If no backend is provided, then the cache for
all configured backends will be updated.
backend
Narrow fileserver backends to a subset of the enabled ones.
.. versionchanged:: 2015.5.0
If all passed backends start with a minus sign (``-``), then these
backends will be excluded from the enabled backends. However, if
there is a mix of backends with and without a minus sign (ex:
``backend=-roots,git``) then the ones starting with a minus
sign will be disregarded.
Additionally, fileserver backends can now be passed as a
comma-separated list. In earlier versions, they needed to be passed
as a python list (ex: ``backend="['roots', 'git']"``)
CLI Example:
.. code-block:: bash
salt-run fileserver.update
salt-run fileserver.update backend=roots,git
'''
fileserver = salt.fileserver.Fileserver(__opts__)
fileserver.update(back=backend)
return True
def clear_cache(backend=None):
'''
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
'''
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret['cleared'] = cleared
if errors:
ret['errors'] = errors
if not ret:
ret = 'No cache was cleared'
salt.output.display_output(ret, 'nested', opts=__opts__)
def clear_lock(backend=None, remote=None):
'''
.. versionadded:: 2015.5.0
Clear the fileserver update lock from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). This should only need to be done if a fileserver
update was interrupted and a remote is not updating (generating a warning
in the Master's log file). Executing this runner with no arguments will
remove all update locks from all enabled VCS fileserver backends, but this
can be narrowed by using the following arguments:
backend
Only clear the update lock for the specified backend(s).
remote
If not None, then any remotes which contain the passed string will have
their lock cleared. For example, a ``remote`` value of **github** will
remove the lock from all github.com remotes.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_lock
salt-run fileserver.clear_lock backend=git,hg
salt-run fileserver.clear_lock backend=git remote=github
salt-run fileserver.clear_lock remote=bitbucket
'''
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_lock(back=backend, remote=remote)
ret = {}
if cleared:
ret['cleared'] = cleared
if errors:
ret['errors'] = errors
if not ret:
ret = 'No locks were removed'
salt.output.display_output(ret, 'nested', opts=__opts__)
def lock(backend=None, remote=None):
'''
.. versionadded:: 2015.5.0
Set a fileserver update lock for VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`).
.. note::
This will only operate on enabled backends (those configured in
:conf_master:`fileserver_backend`).
backend
Only set the update lock for the specified backend(s).
remote
If not None, then any remotes which contain the passed string will have
their lock cleared. For example, a ``remote`` value of ``*github.com*``
will remove the lock from all github.com remotes.
CLI Example:
.. code-block:: bash
salt-run fileserver.lock
salt-run fileserver.lock backend=git,hg
salt-run fileserver.lock backend=git remote='*github.com*'
salt-run fileserver.lock remote=bitbucket
'''
fileserver = salt.fileserver.Fileserver(__opts__)
locked, errors = fileserver.lock(back=backend, remote=remote)
ret = {}
if locked:
ret['locked'] = locked
if errors:
ret['errors'] = errors
if not ret:
ret = 'No locks were set'
salt.output.display_output(ret, 'nested', opts=__opts__)
|
py | 7dfe5767aed6ad1929af28698d4a94bce1d80ef9 | #Copyright Daniel Dunn 2018
#This file is part of
#ShowMessage is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, version 3.
#ShowMessage is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with ShowMessage. If not, see <http://www.gnu.org/licenses/>.
import weakref, types, collections, struct, time, socket,threading,random,os,logging
showmessage_logger = logging.getLogger("ShowMessage")
def universal_weakref(f):
"Create a weakref to an object that works even if the object is a bound method"
if isinstance(f,types.MethodType):
if hasattr(weakref,"WeakMethod"):
return weakref.WeakMethod(f)
else:
raise ValueError("Your version of python does not support weak refs to bound methods, upgrade to 3.4+")
else:
return weakref.ref(f)
import collections
ShowMessage = collections.namedtuple("ShowMessage",['counter', 'opcode','data'])
ShowMessage_message = collections.namedtuple("ShowMessage",['target', 'opcode','name','data','mid'])
def showMessage_message(target,name, data,counter,reliable=True):
"Encodes a showmessage message(as opposed to a showmessage rpc call or the like)"
return(ShowMessage(counter, 1 if reliable else 3, target.encode('utf-8')+b'\n'+name.encode('utf-8')+b'\n'+data))
def parseShowMessage(m,raw=False):
if raw:
if startswith(b'ShowMessage\x00'):
m = m[len(b'ShowMessage\x00'):]
else:
raise RuntimeError("No header")
mid = struct.unpack("<Q",m[:8])[0]
ts = struct.unpack("<Q",m[8:16])[0]
opcode =m[16]
if opcode in [1,2]:
s = m[17:].split(b"\n",2)
return ShowMessage(s[0].decode('utf-8'),opcode,s[1].decode('utf-8'),s[2],mid, ts/1000000.0)
def parseShowMessage(m):
if m.startswith(b'ShowMessage\x00'):
m = m[len(b'ShowMessage\x00'):]
counter = struct.unpack("<Q",m[:8])[0]
opcode =m[8]
data=m[9:]
return ShowMessage(counter, opcode, data)
def makeShowMessage(counter, opcode,data):
m = (b'ShowMessage\x00'+struct.pack("<Q", counter )+struct.pack("<B", opcode)+data)
return m
|
py | 7dfe57c44a361904ffd5073a0b6449ea9355ec97 | # coding: utf-8
import pprint
import re
import six
class ListAuditlogsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'start_time': 'str',
'end_time': 'str',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'start_time': 'start_time',
'end_time': 'end_time',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, x_language=None, instance_id=None, start_time=None, end_time=None, offset=None, limit=None):
"""ListAuditlogsRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._instance_id = None
self._start_time = None
self._end_time = None
self._offset = None
self._limit = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
self.start_time = start_time
self.end_time = end_time
self.offset = offset
self.limit = limit
@property
def x_language(self):
"""Gets the x_language of this ListAuditlogsRequest.
语言
:return: The x_language of this ListAuditlogsRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListAuditlogsRequest.
语言
:param x_language: The x_language of this ListAuditlogsRequest.
:type: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListAuditlogsRequest.
实例ID。
:return: The instance_id of this ListAuditlogsRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListAuditlogsRequest.
实例ID。
:param instance_id: The instance_id of this ListAuditlogsRequest.
:type: str
"""
self._instance_id = instance_id
@property
def start_time(self):
"""Gets the start_time of this ListAuditlogsRequest.
查询开始时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:return: The start_time of this ListAuditlogsRequest.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ListAuditlogsRequest.
查询开始时间,格式为“yyyy-mm-ddThh:mm:ssZ”。 其中,T指某个时间的开始;Z指时区偏移量,例如北京时间偏移显示为+0800。
:param start_time: The start_time of this ListAuditlogsRequest.
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ListAuditlogsRequest.
查询结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”,且大于查询开始时间,时间跨度不超过30天。 其中,T指某个时间的开始,Z指时区偏移量,例如北京时间偏移显示为+0800。
:return: The end_time of this ListAuditlogsRequest.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListAuditlogsRequest.
查询结束时间,格式为“yyyy-mm-ddThh:mm:ssZ”,且大于查询开始时间,时间跨度不超过30天。 其中,T指某个时间的开始,Z指时区偏移量,例如北京时间偏移显示为+0800。
:param end_time: The end_time of this ListAuditlogsRequest.
:type: str
"""
self._end_time = end_time
@property
def offset(self):
"""Gets the offset of this ListAuditlogsRequest.
索引位置,偏移量。 从第一条数据偏移offset条数据后开始查询,默认为0(偏移0条数据,表示从第一条数据开始查询),必须为数字,不能为负数。
:return: The offset of this ListAuditlogsRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListAuditlogsRequest.
索引位置,偏移量。 从第一条数据偏移offset条数据后开始查询,默认为0(偏移0条数据,表示从第一条数据开始查询),必须为数字,不能为负数。
:param offset: The offset of this ListAuditlogsRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListAuditlogsRequest.
查询记录数。取值范围[1, 50]。
:return: The limit of this ListAuditlogsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListAuditlogsRequest.
查询记录数。取值范围[1, 50]。
:param limit: The limit of this ListAuditlogsRequest.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListAuditlogsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 7dfe588bbd03d46402aa86c93bf633bc4c93966a | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest2 as unittest
import os
import shutil
from tinctest.lib import local_path, Gpdiff
from mpp.models import SQLTestCase
@unittest.skip('mock')
class MockSQLTestCase(SQLTestCase):
def test_foo(self):
pass
@unittest.skip('mock')
class MockSQLTestCaseWithGucs(SQLTestCase):
'''
@gucs gp_optimizer=on;gp_log_optimizer=on
'''
class SQLTestCaseTests(unittest.TestCase):
test_case = MockSQLTestCase('test_foo')
def setUp(self):
if os.path.exists(SQLTestCaseTests.test_case.get_out_dir()):
shutil.rmtree(SQLTestCaseTests.test_case.get_out_dir())
def test_pos(self):
test_case = MockSQLTestCase('test_test_pos')
test_case.run_test()
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'test_pos.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'test_pos.out')))
def test_neg(self):
test_case = MockSQLTestCase('test_test_neg')
with self.assertRaises(AssertionError) as cm:
result = test_case.run_test()
def test_with_gucs(self):
test_case = MockSQLTestCaseWithGucs('test_test_pos')
test_case.run_test()
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'test_pos.sql')))
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'test_pos.out')))
self.assertTrue(Gpdiff.are_files_equal(
os.path.join(test_case.get_out_dir(), 'test_pos.sql'),
local_path('gucs/test_pos_gucs.sql')))
def test_file_with_gucs(self):
test_case = MockSQLTestCase('test_test_file_with_gucs')
result = test_case.run_test()
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'test_file_with_gucs.sql')))
self. _check_gucs_exist_in_file(os.path.join(test_case.get_out_dir(), 'test_file_with_gucs.sql'), test_case.gucs)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(), 'test_file_with_gucs.out')))
self.assertTrue(Gpdiff.are_files_equal(
os.path.join(test_case.get_out_dir(), 'test_file_with_gucs.sql'),
local_path('gucs/test_file_with_gucs.sql')))
def _check_gucs_exist_in_file(self, sql_file, gucs):
with open(sql_file, 'r') as f:
for guc in gucs:
guc_exists = False
for line in f:
if guc in line:
guc_exists = True
break
self.assertTrue(guc_exists)
def test_gather_minidump(self):
test_case = MockSQLTestCase('test_test_md')
test_case.gather_mini_dump(test_case.sql_file)
self.assertTrue(os.path.exists(os.path.join(test_case.get_out_dir(),
os.path.basename(test_case.sql_file).replace('.sql', '_minidump.mdp'))))
|
py | 7dfe58bb0bb8475b00eb81650db4547987d77438 | import altair as alt
import pandas as pd
from sys import argv
import sys
import numpy as np
df = pd.read_csv(argv[1])
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.fillna(0.0, inplace=True)
# MAGMA cut-off
def sigfunc(x): return True if x >= 5.60206 else (False)
sequence = [str(x) for x in range(1, 23)]
sequence.append('X')
sequence.append('Y')
def label_filterchr(row):
if(row in sequence):
return True
else:
return False
df['MAGMA association'] = df['association']
df['magma_sig'] = df['association'].apply(sigfunc)
df['CHR'] = df['chromosome'].apply(
lambda x: 23 if x == 'X' else (
24 if x == 'Y' else int(x)))
df.columns = df.columns.str.replace('-', '_')
df.columns = df.columns.str.replace('target', 'gene')
df.columns = df.columns.str.replace('_basal_ganglia', '')
df.columns = df.columns.str.replace('c_1', 'C1')
df['urlgene'] = "https://phenviz.navigome.com/gene_phenotypes/" + \
df['ENSEMBL'].astype(str) + '.html'
selectionsig = alt.selection_multi(fields=['predixcan_sig'], empty='all')
selectionsig2 = alt.selection_multi(fields=['magma_sig'], empty='all')
colorsig = alt.condition(selectionsig,
alt.value('black'),
alt.value('lightgray'))
colorsig2 = alt.condition(selectionsig2,
alt.value('black'),
alt.value('lightgray'))
adipose = ['Adipose_Subcutaneous',
'Adipose_Visceral_Omentum',
'Breast_Mammary_Tissue']
muscular = ['Muscle_Skeletal']
skin = ['Skin_Not_Sun_Exposed_Suprapubic',
'Skin_Sun_Exposed_Lower_leg']
cell_lines = ['Cells_Transformed_fibroblasts',
'Cells_EBV_transformed_lymphocytes']
circulatory = ['DGN_Whole_Blood',
'Whole_Blood',
'Artery_Aorta',
'Artery_Coronary',
'Artery_Tibial',
'Heart_Atrial_Appendage',
'Heart_Left_Ventricle',
'Spleen']
nervous = ['Brain_Amygdala',
'Brain_Anterior_cingulate_cortex_BA24',
'Brain_Caudate',
'Brain_Nucleus_accumbens',
'Brain_Putamen',
'Brain_Cerebellar_Hemisphere',
'Brain_Cerebellum',
'Brain_Cortex',
'Brain_Frontal_Cortex_BA9',
'Brain_Hippocampus',
'Brain_Hypothalamus',
'Brain_Spinal_cord_cervical_C1',
'Brain_Substantia_nigra',
'Nerve_Tibial']
digestive = [
'Colon_Sigmoid',
'Colon_Transverse',
'Esophagus_Gastroesophageal_Junction',
'Esophagus_Mucosa',
'Esophagus_Muscularis',
'Liver',
'Minor_Salivary_Gland',
'Small_Intestine_Terminal_Ileum',
'Stomach'
]
endocrine = [
'Adrenal_Gland',
'Brain_Hypothalamus',
'Pituitary',
'Pancreas',
'Thyroid',
'Ovary',
'Testis',
]
genitourinary = [
'Prostate',
'Testis',
'Ovary',
'Uterus',
'Vagina'
]
lung = ['Lung']
mycolall = ['Adipose_Subcutaneous',
'Adipose_Visceral_Omentum',
'Breast_Mammary_Tissue',
'Muscle_Skeletal',
'DGN_Whole_Blood',
'Whole_Blood',
'Artery_Aorta',
'Artery_Coronary',
'Artery_Tibial',
'Heart_Atrial_Appendage',
'Heart_Left_Ventricle',
'Spleen',
'Brain_Amygdala',
'Brain_Anterior_cingulate_cortex_BA24',
'Brain_Caudate',
'Brain_Nucleus_accumbens',
'Brain_Putamen',
'Brain_Cerebellar_Hemisphere',
'Brain_Cerebellum',
'Brain_Cortex',
'Brain_Frontal_Cortex_BA9',
'Brain_Hippocampus',
'Brain_Hypothalamus',
'Brain_Spinal_cord_cervical_C1',
'Brain_Substantia_nigra',
'Nerve_Tibial',
'Cells_Transformed_fibroblasts',
'Cells_EBV_transformed_lymphocytes',
'Colon_Sigmoid',
'Colon_Transverse',
'Esophagus_Gastroesophageal_Junction',
'Esophagus_Mucosa',
'Esophagus_Muscularis',
'Liver',
'Minor_Salivary_Gland',
'Small_Intestine_Terminal_Ileum',
'Stomach',
'Adrenal_Gland',
'Pituitary',
'Pancreas',
'Thyroid',
'Lung',
'Ovary',
'Prostate',
'Testis',
'Uterus',
'Vagina',
'Skin_Not_Sun_Exposed_Suprapubic',
'Skin_Sun_Exposed_Lower_leg']
sigArray = ['1: gene and tissue analyses',
'2: tissue analyses only',
'3: gene analysis only',
'4: not significant']
if not df['association'].isnull().values.all():
maxpred = df['association'].values.max()
else:
maxpred = 5
# S-PrediXcan cut-off
def label_filter(row):
result = False
for i in mycolall:
if abs(row[i]) >= 4.70813:
result = True
return result
def label_filter_any(row):
result = False
for i in mycolall:
if abs(row[i]) >= 4.70813:
result = True
break
if row['association'] >= 5.60206:
result = True
return result
df['any_filter'] = df.apply(lambda row: label_filter_any(row), axis=1)
print(df.count)
df = df.loc[df['any_filter']]
print(df.count)
if df.isnull().values.all():
chartfinal = alt.Chart(df).mark_point().encode(
x=alt.X(
'name', axis=alt.Axis(
title='')), ).properties(
title="There is no significant gene for this phenotype (yet).")
chartfinal.save(argv[3] + '.html')
sys.exit()
df['predixcan_sig'] = df.apply(lambda row: label_filter(row), axis=1)
def label_sig(row):
if row['predixcan_sig'] and row['magma_sig']:
return sigArray[0]
elif row['predixcan_sig']:
return sigArray[1]
elif row['magma_sig']:
return sigArray[2]
else:
return sigArray[3]
sigArray2 = ['significant', 'not significant']
def label_sig2(row):
if row['predixcan_sig'] or row['magma_sig']:
return sigArray2[0]
else:
return sigArray2[1]
df['significant'] = df.apply(lambda row: label_sig2(row), axis=1)
df['significant in tissue'] = df.apply(lambda row: label_sig(row), axis=1)
selection = alt.selection_multi(fields=['chromosome'], empty='all')
color = alt.condition(selection,
alt.Color('chromosome:N', legend=None),
alt.value('lightgray'))
legendsig = alt.Chart().mark_square().encode(
y=alt.Y(
'predixcan_sig:N',
axis=alt.Axis(
orient='left',
title="S-PrediXcan sig.")),
size=alt.value(100),
color=colorsig).add_selection(selectionsig)
legendsig2 = alt.Chart().mark_square().encode(
y=alt.Y('magma_sig:N', axis=alt.Axis(orient='left', title="MAGMA sig.")),
size=alt.value(100),
color=colorsig2
).add_selection(
selectionsig2
)
legendfilter = alt.vconcat(legendsig, legendsig2)
chart = alt.Chart().mark_point(
opacity=0.8,
stroke='black',
strokeWidth=0.5,
size=3,
filled=True
).encode(
shape=alt.Shape(
'significant in tissue:N',
legend=None,
scale=alt.Scale(
domain=sigArray)),
x=alt.X('median:Q', title="Gene location (base pairs)"),
size=alt.Size(
'significant:N',
scale=alt.Scale(
domain=sigArray2,
range=[
100,
40]),
legend=None),
color=alt.Color('association:Q',
scale=alt.Scale(scheme='viridis', domain=[0, maxpred]),
legend=alt.Legend(title='MAGMA -log10(p)',
orient='right')
),
tooltip=['ENSEMBL', 'name', 'size', 'MAGMA association'],
href=alt.Href('urlgene')
).properties(
width=650,
height=20
).transform_filter(selection).transform_filter(
selectionsig
).transform_filter(
selectionsig2
)
therule = alt.Chart().mark_rule(strokeWidth=15, opacity=0.4).encode(
x=alt.X("begin:Q"),
x2=alt.X2("end:Q"),
tooltip=['ENSEMBL', 'name', 'size', 'MAGMA association'],
color=alt.Color('association'),
href=alt.Href('urlgene')
).transform_filter(selection)
legend = alt.Chart().mark_rect().encode(
x=alt.X('chromosome:N',
axis=alt.Axis(title="Choose a chromosome " +
"(click one of the coloured squares), " +
"then zoom in using trackpad or mouse wheel",
orient='top'), scale=alt.Scale(domain=sequence)),
color=color
).properties(width=650).add_selection(
selection
)
annotation = alt.Chart().mark_text(
align='center',
baseline='middle',
fontSize=7,
color='black',
dy=-6
).encode(
x=alt.X('median:Q', title="Gene location (base pairs)"),
text='name',
href=alt.Href('urlgene')
).transform_filter(selection).transform_filter(
((alt.datum.significant == 'significant'))
).transform_filter(
selectionsig
).transform_filter(
selectionsig2
)
chartfinal = chart + therule + annotation
infine = chartfinal.facet(row='CHR').interactive()
infine.spacing = -32
infine = infine.transform_filter(
selection
)
infine2 = alt.vconcat(legend, infine)
infine2 = alt.hconcat(legendfilter, infine2, data=df).properties(
title='Gene associations: ' + str(argv[2])
).configure_title(
offset=30
)
infine2.save(argv[3] + '.html')
|
py | 7dfe58c6ab1f0644165490ff1451aefbc2c35cc8 | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.71
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_client.exceptions import ApiAttributeError
class GetProjectAccessResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
}
@cached_property
def discriminator():
return None
attribute_map = {
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetProjectAccessResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetProjectAccessResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 7dfe58d001a05b0d3f80e870dbd696e181fac453 | # -*- coding: utf-8 -*-
import socket
import serial
import serial.tools.list_ports
import threading
import sys
import os
from time import sleep
from requests import Session, exceptions
from PyQt5.QtWidgets import (
QApplication, QMainWindow, QMessageBox
)
from PyQt5 import QtGui
from main_window_ui import Ui_oMainWind
# def resource_path(relative_path):
# base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
# return os.path.join(base_path, relative_path)
class Window(QMainWindow, Ui_oMainWind):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.sys_ports = []
self.ui = Ui_oMainWind()
self.ui.setupUi(self)
self.connectSignalsSlots()
self.bStartStopFlag = False
self.oSocketHolder = None
self.oSerialHolder = None
self.oConnectHolder = None
self.oThreadHolderRx = None
self.oThreadHolderTx = None
def connectSignalsSlots(self):
self.ui.oActInfo.triggered.connect(self.about)
self.ui.oActExit.triggered.connect(self.close)
self.ui.oEntryIp0.setText('192')
self.ui.oEntryIp1.setText('168')
self.ui.oEntryIp2.setText('16')
self.ui.oEntryIp3.setText('123')
self.ui.oEntryPort.setText('5000')
# self.ui.oLbStatus.setPixmap(QtGui.QPixmap(resource_path("red.png")))
# self.ui.oLbLargeIcon.setPixmap(QtGui.QPixmap(resource_path("Qorvo_Logo.png")))
# icon = QtGui.QIcon()
# icon.addPixmap(QtGui.QPixmap(resource_path("qorvo_ico.ico")), QtGui.QIcon.Normal, QtGui.QIcon.On)
# icon.addPixmap(QtGui.QPixmap(":/qorvo_ico.ico"), QtGui.QIcon.Normal, QtGui.QIcon.On)
# self.setWindowIcon(icon)
self.ui.oEntryBaud.setText('57600')
self.ui.oEntryDataBits.setText('8')
self.ui.oEntryParityBits.setText('N')
self.ui.oEntryStopBits.setText('1')
self.updateComList()
self.ui.oButStartStop.clicked.connect(self.startStopBind)
def forceStop(self):
l_label = ['Stop', 'Start']
self.bStartStopFlag = False
self.ui.oButStartStop.setText(l_label[int(not self.bStartStopFlag)])
self.ui.oListBoxCom.setDisabled(False)
self.ui.oEntryIp0.setDisabled(False)
self.ui.oEntryIp1.setDisabled(False)
self.ui.oEntryIp2.setDisabled(False)
self.ui.oEntryIp3.setDisabled(False)
self.ui.oEntryPort.setDisabled(False)
self.ui.oEntryBaud.setDisabled(False)
self.ui.oEntryDataBits.setDisabled(False)
self.ui.oEntryParityBits.setDisabled(False)
self.ui.oEntryStopBits.setDisabled(False)
self.closeAll()
def startStopBind(self):
l_label = ['Stop', 'Start']
self.bStartStopFlag = not self.bStartStopFlag
print('The start flag: {}'.format(self.bStartStopFlag))
self.ui.oButStartStop.setText(l_label[int(not self.bStartStopFlag)])
if not self.bStartStopFlag:
self.ui.oListBoxCom.setDisabled(False)
self.ui.oEntryIp0.setDisabled(False)
self.ui.oEntryIp1.setDisabled(False)
self.ui.oEntryIp2.setDisabled(False)
self.ui.oEntryIp3.setDisabled(False)
self.ui.oEntryPort.setDisabled(False)
self.ui.oEntryBaud.setDisabled(False)
self.ui.oEntryDataBits.setDisabled(False)
self.ui.oEntryParityBits.setDisabled(False)
self.ui.oEntryStopBits.setDisabled(False)
# self.ui.oLbStatus.setPixmap(QtGui.QPixmap(resource_path("red.png")))
self.ui.oLbStatus.setPixmap(QtGui.QPixmap(":/red.png"))
self.closeAll()
else:
self.ui.oListBoxCom.setDisabled(True)
self.ui.oEntryIp0.setDisabled(True)
self.ui.oEntryIp1.setDisabled(True)
self.ui.oEntryIp2.setDisabled(True)
self.ui.oEntryIp3.setDisabled(True)
self.ui.oEntryPort.setDisabled(True)
self.ui.oEntryBaud.setDisabled(True)
self.ui.oEntryDataBits.setDisabled(True)
self.ui.oEntryParityBits.setDisabled(True)
self.ui.oEntryStopBits.setDisabled(True)
# self.ui.oLbStatus.setPixmap(QtGui.QPixmap(resource_path("green.png")))
self.ui.oLbStatus.setPixmap(QtGui.QPixmap(":/green.png"))
self.startTcpIpCom()
def startTcpIpCom(self):
s_website = r"http://{}.{}.{}.{}".format(
self.ui.oEntryIp0.text(),
self.ui.oEntryIp1.text(),
self.ui.oEntryIp2.text(),
self.ui.oEntryIp3.text())
print('Site address: {}'.format(s_website))
# setup module
with Session() as o_session:
try:
o_site = o_session.get(r"{}/login".format(s_website), timeout=2)
# login
o_state = o_session.post(r"{}/state".format(s_website), data={'__PPAS': 'admin'})
# print(o_state.content.decode('gb2312'))
except exceptions.Timeout:
# cannot reach
self.startStopBind()
return
o_soc_holder = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
o_soc_holder.connect(("8.8.8.8", 80))
s_my_ip = o_soc_holder.getsockname()[0]
o_soc_holder.shutdown(socket.SHUT_RDWR)
o_soc_holder.close()
self.oSocketHolder = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = (s_my_ip, 60000)
print(sys.stderr, 'starting up on %s port %s' % server_address)
self.oSocketHolder.bind(server_address)
# Listen for incoming connections
self.oSocketHolder.listen(1)
self.oSerialHolder = serial.Serial(self.ui.oListBoxCom.currentText(), self.ui.oEntryBaud.text(), timeout=5)
self.oSerialHolder.close()
self.oSerialHolder.open()
self.oConnectHolder, addr = self.oSocketHolder.accept()
# thread has to start before other loop
self.oThreadHolderRx = threading.Thread(target=self.recv_msg)
self.oThreadHolderRx.start()
self.oThreadHolderTx = threading.Thread(target=self.send_msg)
self.oThreadHolderTx.start()
def recv_msg(self):
while self.bStartStopFlag:
try:
recv_msg = self.oConnectHolder.recv(1024)
if not recv_msg:
print('Error occur!')
return
self.oSerialHolder.write(recv_msg)
except socket.error, exc:
print('Socket Error {}'.format(exc))
self.forceStop()
return
def send_msg(self):
while self.bStartStopFlag:
try:
send_msg = self.oSerialHolder.read_all()
send_msg = send_msg.encode()
self.oConnectHolder.send(send_msg)
except socket.error, exc:
print('Socket Error {}'.format(exc))
self.forceStop()
return
def closeAll(self):
self.oConnectHolder.shutdown(socket.SHUT_RDWR)
sleep(0.5)
print self.oThreadHolderRx.isAlive()
print self.oThreadHolderTx.isAlive()
self.oSerialHolder.close()
def updateComList(self):
self.ui.oListBoxCom.clear()
l_ports = serial.tools.list_ports.comports()
connected = [element.device for element in l_ports]
self.ui.oListBoxCom.addItems(connected)
def about(self):
o_msg_box = QMessageBox()
o_msg_box.setWindowTitle("TCP/IP Serial Binding Tool")
o_msg_box.setText("<p>Designer: Brfo</p>"
"<p>Contact: [email protected]</p>"
"<p>Date: 2021</p>")
o_msg_box.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
|
py | 7dfe59003d72bef58c36c3007d1b50b6f9892465 | import requests
import json
def quote_of_the_day():
try:
url = 'https://quotes.rest/qod/'
response = requests.get(url)
response_json = json.loads(response.text)
try: # Only 10 requests allowed per hour
quote = response_json[u'contents'][u'quotes'][0][u'quote']
quote_author = response_json[u'contents'][u'quotes'][0][u'author']
except KeyError:
quote = response_json[u'error'][u'code']
quote_author = response_json[u'error'][u'message']
quote_of_day = str(quote) + ' - ' + str(quote_author)
return quote_of_day
except Exception as e:
# TODO: Handle correct exceptions properly
print(e)
return "Error Beep Boop" |
py | 7dfe59b5b2d8b4c10d96c97410e613e79a2089b9 | """
ttgo_hello.py
Writes "Hello!" in random colors at random locations on a
LILYGO® TTGO T-Display.
https://www.youtube.com/watch?v=atBa0BYPAAc
"""
import random
from machine import Pin, SPI
import st7789py as st7789
# Choose a font
# from fonts import vga1_8x8 as font
# from fonts import vga2_8x8 as font
# from fonts import vga1_8x16 as font
# from fonts import vga2_8x16 as font
# from fonts import wyse1_16x16 as font
# from fonts import wyse1_bold_16x16 as font
# from fonts import wyse2_16x16 as font
# from fonts import wyse2_bold_16x16 as font
# from fonts import wyse1_16x32 as font
# from fonts import wyse1_bold_16x32 as font
# from fonts import wyse2_16x32 as font
from fonts import wyse2_bold_16x32 as font
def main():
backlight = Pin(4, Pin.OUT)
backlight.value(1)
tft = st7789.ST7789(
SPI(2, baudrate=30000000, polarity=1, phase=1, sck=Pin(18), mosi=Pin(19)),
135,
240,
reset=Pin(23, Pin.OUT),
cs=Pin(5, Pin.OUT),
dc=Pin(16, Pin.OUT),
backlight=backlight,
rotation=0)
while True:
for rotation in range(4):
tft.rotation(rotation)
tft.fill(0)
col_max = tft.width - font.WIDTH*6
row_max = tft.height - font.HEIGHT
for _ in range(100):
tft.text(
font,
"Hello!",
random.randint(0, col_max),
random.randint(0, row_max),
st7789.color565(
random.getrandbits(8),
random.getrandbits(8),
random.getrandbits(8)),
st7789.color565(
random.getrandbits(8),
random.getrandbits(8),
random.getrandbits(8))
)
main()
|
py | 7dfe5b24faf20ca30146e27658a6005424e2aa08 | from fastapi import Query
from typing import List, Optional
from pydantic.main import BaseModel
class BtcNetwork(BaseModel):
name: str = Query(..., description="Which network is in use (ipv4, ipv6 or onion)")
limited: bool = Query(..., description="Is the network limited using - onlynet?")
reachable: bool = Query(..., description="Is the network reachable?")
proxy: Optional[str] = Query(
"",
description="host:port of the proxy that is used for this network, or empty if none",
)
proxy_randomize_credentials: bool = Query(
..., description="Whether randomized credentials are used"
)
@classmethod
def from_rpc(cls, r):
return cls(
name=r["name"],
limited=r["limited"],
reachable=r["reachable"],
proxy=r["proxy"],
proxy_randomize_credentials=r["proxy_randomize_credentials"],
)
class BtcLocalAddress(BaseModel):
address: str = Query(..., description="Network address")
port: int = Query(..., description="Network port")
score: int = Query(..., description="Relative score")
@classmethod
def from_rpc(cls, local_address):
return BtcLocalAddress(
address=local_address["address"],
port=local_address["port"],
score=local_address["score"],
)
# getnetworkinfo
class NetworkInfo(BaseModel):
version: int = Query(..., description="The bitcoin core server version")
subversion: str = Query(..., description="The server subversion string")
protocol_version: int = Query(..., description="The protocol version")
local_services: str = Query(
None, description="The services we offer to the network, hex formatted"
)
local_services_names: List[str] = Query(
[], description="The services we offer to the network, in human-readable form"
)
local_relay: bool = Query(
..., description="True if transaction relay is requested from peers"
)
time_offset: int = Query(..., description="The time offset")
connections: int = Query(..., description="The total number of connections")
connections_in: int = Query(..., description="The number of inbound connections")
connections_out: int = Query(..., description="The number of outbound connections")
network_active: bool = Query(..., description="Whether p2p networking is enabled")
networks: List[BtcNetwork] = Query(..., description="Information per network")
relay_fee: int = Query(
..., description="Minimum relay fee for transactions in BTC/kB"
)
incremental_fee: int = Query(
...,
description="Minimum fee increment for mempool limiting or BIP 125 replacement in BTC/kB",
)
local_addresses: List[BtcLocalAddress] = Query(
[], description="List of local addresses"
)
warnings: str = Query(None, description="Any network and blockchain warnings")
@classmethod
def from_rpc(cls, r):
networks = []
for n in r["networks"]:
networks.append(BtcNetwork.from_rpc(n))
return cls(
version=r["version"],
subversion=r["subversion"],
protocol_version=r["protocolversion"],
local_services=r["localservices"],
local_services_names=[name for name in r["localservicesnames"]],
local_relay=r["localrelay"],
time_offset=r["timeoffset"],
connections=r["connections"],
connections_in=r["connections_in"],
connections_out=r["connections_out"],
network_active=r["networkactive"],
networks=[BtcNetwork.from_rpc(n) for n in r["networks"]],
relay_fee=r["relayfee"],
incremental_fee=r["incrementalfee"],
local_addresses=[BtcLocalAddress.from_rpc(n) for n in r["localaddresses"]],
warnings=r["warnings"],
)
class Bip9Statistics(BaseModel):
period: int = Query(
..., description="The length in blocks of the BIP9 signalling period"
)
threshold: int = Query(
...,
description="The number of blocks with the version bit set required to activate the feature",
)
elapsed: int = Query(
...,
description="The number of blocks elapsed since the beginning of the current period",
)
count: int = Query(
...,
description="The number of blocks with the version bit set in the current period",
)
possible: bool = Query(
...,
description="False if there are not enough blocks left in this period to pass activation threshold",
)
@classmethod
def from_rpc(cls, r):
return cls(
period=r["period"],
threshold=r["threshold"],
elapsed=r["elapsed"],
count=r["count"],
possible=r["possible"],
)
class Bip9Data(BaseModel):
status: str = Query(
...,
description="""One of "defined", "started", "locked_in", "active", "failed" """,
)
bit: int = Query(
None,
description="the bit(0-28) in the block version field used to signal this softfork(only for `started` status)",
)
start_time: int = Query(
...,
description="The minimum median time past of a block at which the bit gains its meaning",
)
timeout: int = Query(
...,
description="The median time past of a block at which the deployment is considered failed if not yet locked in",
)
since: int = Query(
..., description="Height of the first block to which the status applies"
)
min_activation_height: int = Query(
..., description="Minimum height of blocks for which the rules may be enforced"
)
statistics: Bip9Statistics = Query(
None,
description="numeric statistics about BIP9 signalling for a softfork(only for `started` status)",
)
height: int = Query(
None,
description="Height of the first block which the rules are or will be enforced(only for `buried` type, or `bip9` type with `active` status)",
)
active: bool = Query(
None,
description="True if the rules are enforced for the mempool and the next block",
)
@classmethod
def from_rpc(cls, r):
return cls(
status=r["status"],
bit=r["bit"] if "bit" in r else None,
start_time=r["start_time"],
timeout=r["timeout"],
since=r["since"],
min_activation_height=r["min_activation_height"],
statistics=Bip9Statistics.from_rpc(r["statistics"])
if "statistics" in r
else None,
height=r["height"] if "height" in r else None,
active=r["active"] if "active" in r else None,
)
class SoftFork(BaseModel):
name: str = Query(..., description="Name of the softfork")
type: str = Query(..., description='One of "buried", "bip9"')
active: bool = Query(
...,
description="True **if** the rules are enforced for the mempool and the next block",
)
bip9: Bip9Data = Query(
None, description='Status of bip9 softforks(only for "bip9" type)'
)
height: int = Query(
None,
description="Height of the first block which the rules are or will be enforced (only for `buried` type, or `bip9` type with `active` status)",
)
@classmethod
def from_rpc(cls, name: str, r: dict):
return cls(
name=name,
type=r["type"],
active=r["active"],
bip9=Bip9Data.from_rpc(r["bip9"]) if "bip9" in r else None,
height=r["height"] if "height" in r else None,
)
class BlockchainInfo(BaseModel):
chain: str = Query(..., description="Current network name(main, test, regtest)")
blocks: int = Query(
...,
description="The height of the most-work fully-validated chain. The genesis block has height 0",
)
headers: int = Query(
..., description="The current number of headers we have validated"
)
best_block_hash: str = Query(
..., description="The hash of the currently best block"
)
difficulty: int = Query(..., description="The current difficulty")
mediantime: int = Query(..., description="Median time for the current best block")
verification_progress: float = Query(
..., description="Estimate of verification progress[0..1]"
)
initial_block_download: bool = Query(
...,
description="Estimate of whether this node is in Initial Block Download mode",
)
chainwork: str = Query(
..., description="total amount of work in active chain, in hexadecimal"
)
size_on_disk: int = Query(
..., description="The estimated size of the block and undo files on disk"
)
pruned: bool = Query(..., description="If the blocks are subject to pruning")
prune_height: int = Query(
None,
description="Lowest-height complete block stored(only present if pruning is enabled)",
)
automatic_pruning: bool = Query(
None,
description="Whether automatic pruning is enabled(only present if pruning is enabled)",
)
prune_target_size: int = Query(
None,
description="The target size used by pruning(only present if automatic pruning is enabled)",
)
warnings: str = Query(..., description="Any network and blockchain warnings")
softforks: List[SoftFork] = Query(..., description="Status of softforks")
@classmethod
def from_rpc(cls, r):
softforks = []
for name in r["softforks"]:
softforks.append(SoftFork.from_rpc(name, r["softforks"][name]))
return cls(
chain=r["chain"],
blocks=r["blocks"],
headers=r["headers"],
best_block_hash=r["bestblockhash"],
difficulty=r["difficulty"],
mediantime=r["mediantime"],
verification_progress=r["verificationprogress"],
initial_block_download=r["initialblockdownload"],
chainwork=r["chainwork"],
size_on_disk=r["size_on_disk"],
pruned=r["pruned"],
pruned_height=None if not "pruneheight" in r else r["pruneheight"],
automatic_pruning=None
if not "automatic_pruning" in r
else r["automatic_pruning"],
prune_target_size=None
if not "prune_target_size" in r
else r["prune_target_size"],
warnings=r["warnings"],
softforks=softforks,
)
class BtcInfo(BaseModel):
# Info regarding bitcoind
blocks: int = Query(
...,
description="The height of the most-work fully-validated chain. The genesis block has height 0",
)
headers: int = Query(
..., description="The current number of headers we have validated"
)
verification_progress: float = Query(
..., description="Estimate of verification progress[0..1]"
)
difficulty: int = Query(..., description="The current difficulty")
size_on_disk: int = Query(
..., description="The estimated size of the block and undo files on disk"
)
networks: List[BtcNetwork] = Query(
[], description="Which networks are in use (ipv4, ipv6 or onion)"
)
version: int = Query(..., description="The bitcoin core server version")
subversion: str = Query(..., description="The server subversion string")
connections_in: int = Query(..., description="The number of inbound connections")
connections_out: int = Query(..., description="The number of outbound connections")
@classmethod
def from_rpc(cls, binfo: BlockchainInfo, ninfo: NetworkInfo):
return cls(
blocks=binfo.blocks,
headers=binfo.headers,
verification_progress=binfo.verification_progress,
difficulty=binfo.difficulty,
size_on_disk=binfo.size_on_disk,
networks=ninfo.networks,
version=ninfo.version,
subversion=ninfo.subversion,
connections_in=ninfo.connections_in,
connections_out=ninfo.connections_out,
)
|
py | 7dfe5b9d9da0bab64b352772db4752dd63ff0653 | from todo.application.task.repository import TaskRepository
from todo.infrastructure.task.repository import DatastoreTaskRepository
def bind_todo(binder):
binder.bind(TaskRepository, to=DatastoreTaskRepository)
|
py | 7dfe5bf6afbd0384a34edb560f965f965429336c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 14:01:16 2020
@author: rahulr
"""
from oab.shape.Shape import Shape
from math import sin,cos
class Square(Shape):
def populate_coords(self):
sides = self.sides
reso = (2 * 3.14/4)
ptX = self.origin[0]
ptY = self.origin[1]
start_ang = self.angle
coords = []
for i in range(4):
X = ptX + sides * cos(start_ang + i * reso)
Y = ptY + sides * sin(start_ang + i * reso)
coords.append([X, Y])
return coords |
py | 7dfe5c2497c888522cfbf3504f81dca16e86a30b | from walt.server.tools import columnate, format_node_models_list
MSG_WS_IS_EMPTY="""\
Your working set is empty.
Use 'walt image search [<keyword>]' to search for images
you could build upon.
Then use 'walt image clone <clonable_link>' to clone them
into your working set.
"""
def show(db, docker, images, requester, refresh):
username = requester.get_username()
if not username:
return None # client already disconnected, give up
if refresh:
images.refresh()
tabular_data = []
for image in images.values():
if image.user != username:
continue
created_at = image.created_at
node_models = image.get_node_models()
tabular_data.append([
image.name,
str(image.in_use),
created_at if created_at else 'N/A',
str(image.ready),
format_node_models_list(node_models) if node_models else 'N/A'])
if len(tabular_data) == 0:
# new user, try to make his life easier by cloning
# default images of node models present on the platform.
node_models = set(n.model for n in db.select('nodes'))
if len(node_models) == 0: # no nodes
return MSG_WS_IS_EMPTY
requester.set_busy_label('Cloning default images')
for model in node_models:
default_image = images.get_default_image_fullname(model)
ws_image = username + '/' + default_image.split('/')[1]
docker.local.tag(default_image, ws_image)
images.register_image(ws_image, True)
requester.set_default_busy_label()
# restart the process
return show(db, docker, images, requester, refresh)
header = [ 'Name', 'In-use', 'Created', 'Ready', 'Compatibility' ]
return columnate(tabular_data, header)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.