filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_17872 | import multiprocessing as mp
from typing import Iterable
from dynesty.dynesty import _function_wrapper
from emcee.ensemble import _FunctionWrapper
from .process import AbstractJob, Process
def _is_likelihood_function(
function
) -> bool:
"""
Is the function a callable used to evaluate likelihood?
Naturally in Autofit this would be a child of the Fitness class.
In Dynesty the likelihood function is wrapped in _function_wrapper
and called 'loglikelihood'
Parameters
----------
function
Some object
Returns
-------
Is the object a log likelihood function?
"""
from autofit.non_linear.abstract_search import NonLinearSearch
return any([
isinstance(
function,
NonLinearSearch.Fitness
),
isinstance(
function,
_function_wrapper
) and function.name == 'loglikelihood',
isinstance(
function,
_FunctionWrapper
)
])
class SneakyJob(AbstractJob):
def __init__(self, function, *args):
"""
A job performed on a process.
If the function is the log likelihood function then it is set to None.
If the log likelihood function is in the args, it is filtered from the args,
but its index retained.
This prevents large amounts of data comprised in an Analysis class from being
copied over to processes multiple times.
Parameters
----------
function
Some function to which a pool.map has been applied
args
The arguments to that function
"""
super().__init__()
if _is_likelihood_function(function):
self.function = None
else:
self.function = function
self.args = list()
self.fitness_index = None
for i, arg in enumerate(args):
if _is_likelihood_function(
arg
):
if self.fitness_index is not None:
raise AssertionError(
f"Two arguments of type NonLinear.Fitness passed to {function.__name__}"
)
self.fitness_index = i
else:
self.args.append(arg)
def perform(self, likelihood_function):
"""
Computes the log likelihood. The likelihood function
is passed from a copy associated with the current process.
Depending on whether the likelihood function itself is
being mapped, or some function mapped onto the likelihood
function as an argument, the likelihood function will be
called or added to the arguments.
Parameters
----------
likelihood_function
A likelihood function associated with the processes
to avoid copying data for every single function call
Returns
-------
The log likelihood
"""
if self.function is None:
return likelihood_function(
self.args
)
args = (
self.args[:self.fitness_index]
+ [likelihood_function]
+ self.args[self.fitness_index:]
)
return self.function(
args
)
class StopCommand:
"""
A command that can be passed into a process queue to gracefully stop
the process
"""
class SneakyProcess(Process):
def run(self):
"""
Run this process, completing each job in the job_queue and
passing the result to the queue.
The process continues to execute until a StopCommand is passed.
This occurs when the SneakyMap goes out of scope.
"""
self._init()
self.logger.debug("starting")
while True:
job = self.job_queue.get()
if job is StopCommand:
break
try:
self.queue.put(
job.perform(
*self.job_args
)
)
except Exception as e:
self.queue.put(e)
self.logger.debug("terminating process {}".format(self.name))
self.job_queue.close()
class SneakyPool:
def __init__(
self,
processes: int,
fitness,
initializer=None,
initargs=None
):
"""
Implements the same interface as multiprocessing's pool,
but associates the fitness object with each process to
prevent data being copies to each process for every function
call.
Parameters
----------
processes
The number of cores to be used simultaneously.
fitness
A class comprising data and a model which can be used
to evaluate the likelihood of a live point
initializer
initargs
"""
self.job_queue = mp.Queue()
self.processes = [
SneakyProcess(
str(number),
self.job_queue,
initializer=initializer,
initargs=initargs,
job_args=(fitness,)
)
for number in range(processes)
]
for process in self.processes:
process.start()
def map(self, function, args_list):
"""
Execute the function with the given arguments across all of the
processes. The likelihood argument is removed from each args in
the args_list.
Parameters
----------
function
Some function
args_list
An iterable of iterables of arguments passed to the function
Yields
------
Results from the function evaluation
"""
jobs = [
SneakyJob(
function,
*(
(args,) if not isinstance(
args,
Iterable
) else tuple(args)
)
) for args in args_list
]
for job in jobs:
self.job_queue.put(
job
)
target = len(jobs)
count = 0
exception = None
while count < target:
for process in self.processes:
if not process.queue.empty():
item = process.queue.get()
count += 1
if isinstance(
item,
Exception
):
exception = item
else:
yield item
if exception is not None:
raise exception
def __del__(self):
"""
Called when the map goes out of scope.
Tell each process to terminate with a StopCommand and then join
each process with a timeout of one second.
"""
for _ in range(len(self.processes)):
self.job_queue.put(StopCommand)
for process in self.processes:
process.join(0.5)
|
the-stack_0_17874 | import csv
import json
import random
import requests
from requests.exceptions import Timeout
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from backend.core.BGPtopology import BGPtopology
from backend.api.SimulationWorker import SimulationWorker
from backend.api.SimulationPrinter import SimulationPrinter
from mpipe import UnorderedWorker
class SimulationConstructor(UnorderedWorker):
'''
Performs RPKI Route Origin Validation, by quering the Routinator's (open source RPKI Relying Party software)
HTTP API endpoint running on a server (e.g., localhost on port 9556)
It concatenates the endpoint_url, origin_asn, prefix arguments in a single url string and sends an GET request to the API.
IF the returned HTTP status code is 200:
return the validity state of this route announcement (valid, invalid, or not found)
ELSE:
return the HTTP status code (we dont have any data that indicate the validity of the route announcement)
Input arguments:
(a) endpoint_url: the endpoint's URL which is used for Route Origin Validation
e.g., in our case http://localhost:9556/api/v1/validity/
(b) origin_asn: the origin AS number of the route announcement (in AS_PATH)
(c) prefix: the prefix of the route announcement
Returns:
The validity state of this route announcement (valid, invalid, or not found)
IF the returned HTTP status code is 200, ELSE the HTTP status code
'''
def do_rov(self, endpoint_url, origin_asn, prefix):
url = endpoint_url + str(origin_asn) + "/" + prefix
routinator_adapter = HTTPAdapter(max_retries=3)
session = requests.Session()
# Use `routinator_adapter` for all requests to endpoints that start with the endpoint_url argument
session.mount(endpoint_url, routinator_adapter)
try:
response = session.get(url, timeout=3)
except ConnectionError as ce:
print(ce)
except Timeout:
print('The request timed out')
else:
# print('The request did not time out')
if response.status_code == 200:
# Successful GET request
# print(response.json())
return response.json()["validated_route"]["validity"]["state"]
else:
# HTTP Response not contains useful data for the ROV
return response.status_code
def load_ROV_Deployment_monitor_data(self, file_path):
asn_do_rov_list = []
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Columns names are {", ".join(row)}')
line_count += 1
else:
print("ASN: " + row[0], "AS Name: " + row[1], "Certainty: " + row[2])
if float(row[2]) >= 0.5:
asn_do_rov_list.append(int(row[0]))
line_count += 1
print(f'Processed: {line_count} lines.')
print(asn_do_rov_list)
return asn_do_rov_list
def load_ROV_Active_Measurements_data(self, file_path):
with open(file_path) as json_file:
data = json.load(json_file)
# use the Total Unique ROV (Fully+Partially filtering) result
asn_do_rov_list = [int(item) for item in data["2"][129]]
print(asn_do_rov_list)
return asn_do_rov_list
def load_ASRank_data(self, file_path):
with open(file_path) as json_file:
data = json.load(json_file)
return data
def generate_rpki_rov_list(self, num_of_top_isp_rpki_adopters, rpki_adoption_propability, top_500_ASRank_ASNs):
n_top_ISPs = int(num_of_top_isp_rpki_adopters / rpki_adoption_propability)
set_of_n_top_ISPs = top_500_ASRank_ASNs["data"]["asns"]["edges"][0:n_top_ISPs]
list_of_n_top_ASNs = []
for item in set_of_n_top_ISPs:
list_of_n_top_ASNs.append(int(item["node"]["asn"]))
return random.sample(list_of_n_top_ASNs, num_of_top_isp_rpki_adopters)
def set_rpki_rov(self, Topo, sim_data):
if sim_data['rpki_rov_mode'] == "all":
print("RPKI ROV mode --> all")
for asn in Topo.get_all_nodes_ASNs():
Topo.get_node(asn).rov = True
elif sim_data['rpki_rov_mode'] == "rov_deployment_monitor":
print("RPKI ROV mode --> rov_deployment_monitor")
rov_deployment_monitor_list = self.load_ROV_Deployment_monitor_data("../datasets/ROV-Deployment-Monitor/2020-08-31.csv")
for asn in rov_deployment_monitor_list:
if Topo.has_node(asn):
Topo.get_node(asn).rov = True
elif sim_data['rpki_rov_mode'] == "rov_active_measurements":
print("RPKI ROV mode --> rov_active_measurements")
rov_active_measurements_list = self.load_ROV_Active_Measurements_data("../datasets/ROV-Active-Measurements-TMA-Paper/20210719_resultset_asns.json")
for asn in rov_active_measurements_list:
if Topo.has_node(asn):
Topo.get_node(asn).rov = True
elif sim_data['rpki_rov_mode'] == "manual":
print("RPKI ROV mode --> manual")
top_500_ASRank_ASNs = self.load_ASRank_data("../datasets/ASRank/top_500_ASNs.json")
top_random_ISPs_list = self.generate_rpki_rov_list(sim_data['num_of_top_isp_rpki_adopters'], sim_data['rpki_adoption_propability'], top_500_ASRank_ASNs)
print(top_random_ISPs_list)
for asn in top_random_ISPs_list:
if Topo.has_node(asn):
Topo.get_node(asn).rov = True
elif sim_data['rpki_rov_mode'] == "today_rov_status+other_random_prop":
print("RPKI ROV mode --> today_rov_status+other_random_prop")
rov_active_measurements_list = self.load_ROV_Active_Measurements_data("../datasets/ROV-Active-Measurements-TMA-Paper/20210719_resultset_asns.json")
tmp_rov_list = [item for item in Topo.get_all_nodes_ASNs() if item not in rov_active_measurements_list]
if sim_data['other_random_prop'] == 0:
other_rov_list = []
else:
other_rov_list = random.sample(tmp_rov_list, int(len(tmp_rov_list) * sim_data['other_random_prop']))
final_rov_list = rov_active_measurements_list + other_rov_list
for asn in final_rov_list:
if Topo.has_node(asn):
Topo.get_node(asn).rov = True
elif sim_data['rpki_rov_mode'] == "top_isps_rov+other_random_prop":
print("RPKI ROV mode --> top_isps_rov+other_random_prop")
top_500_ASRank_ASNs = self.load_ASRank_data("../datasets/ASRank/top_500_ASNs.json")
top_rov_ISPs_list = self.generate_rpki_rov_list(sim_data['num_of_top_isp_rpki_adopters'],
sim_data['rpki_adoption_propability'],
top_500_ASRank_ASNs)
tmp_rov_list = [item for item in Topo.get_all_nodes_ASNs() if item not in top_rov_ISPs_list]
if sim_data['other_random_prop'] == 0:
other_rov_list = []
else:
other_rov_list = random.sample(tmp_rov_list, int(len(tmp_rov_list) * sim_data['other_random_prop']))
final_rov_list = top_rov_ISPs_list + other_rov_list
for asn in final_rov_list:
if Topo.has_node(asn):
Topo.get_node(asn).rov = True
elif sim_data['rpki_rov_mode'] == "random_20":
print("RPKI ROV mode --> Random 20%")
all_BGP_nodes_list = Topo.get_all_nodes_ASNs()
random_20_BGP_nodes_list = random.sample(all_BGP_nodes_list, int(len(all_BGP_nodes_list) * 0.2))
for asn in random_20_BGP_nodes_list:
if Topo.has_node(asn):
Topo.get_node(asn).rov = True
return
def set_rpki_rov_table(self, Topo, sim_data, validator_url):
# In type 1,2,3,...,N hijacks, the origin AS, in the AS_PATH that the hijacker announce to its neighbors,
# is always the victim AS !!! For this reason, the rov_table contains only entries for the hijacker, victim and helper ASes
# Outdated --> (Furthermore, we assume that the victim and helper ASes mitigate the subprefix attack by announcing the same subprefix
# as the hijacker (e.g., the hijacker announces the longest subprefix that is permissible)).
rpki_rov_table = {}
if sim_data['realistic_rpki_rov'] == False:
print("Hypothetical ROV")
rpki_rov_table[(sim_data['legitimate_AS'], sim_data['legitimate_prefix'])] = random.choice(["valid", "not-found"])
rpki_rov_table[(sim_data['legitimate_AS'], sim_data['hijacker_prefix'])] = random.choice(["valid", "not-found"]) #useful for type 1, 2, 3 ..., N attacks
rpki_rov_table[(sim_data['legitimate_AS'], sim_data['mitigation_prefix'])] = random.choice(["valid", "not-found"])
rpki_rov_table[(sim_data['hijacker_AS'], sim_data['hijacker_prefix'])] = random.choice(["invalid", "not-found"])
for helper in sim_data['anycast_ASes']:
rpki_rov_table[(helper, sim_data['mitigation_prefix'])] = random.choice(["valid", "not-found"])
else:
print("Realistic ROV")
AS_to_validate = []
AS_to_validate.append((sim_data['legitimate_AS'], sim_data['legitimate_prefix']))
AS_to_validate.append((sim_data['legitimate_AS'], sim_data['hijacker_prefix'])) #useful for type 1, 2, 3 ..., N attacks
AS_to_validate.append((sim_data['legitimate_AS'], sim_data['mitigation_prefix']))
AS_to_validate.append((sim_data['hijacker_AS'], sim_data['hijacker_prefix']))
for helper in sim_data['anycast_ASes']:
AS_to_validate.append((helper, sim_data['mitigation_prefix']))
AS_to_validate = list(set([i for i in AS_to_validate])) #remove duplicates from the list
for item in AS_to_validate:
origin_AS = item[0]
origin_prefix = item[1]
validity_state = self.do_rov(validator_url, origin_AS, origin_prefix)
rpki_rov_table[(origin_AS, origin_prefix)] = validity_state
'''
Set the rpki rov table, only if the BGPnode performs ROV
'''
for asn in Topo.get_all_nodes_ASNs():
if Topo.get_node(asn).rov == True:
Topo.get_node(asn).rpki_validation = rpki_rov_table
for entry in rpki_rov_table:
print(entry, rpki_rov_table[entry])
return rpki_rov_table
def load_create_Topology(self, Topo, sim_data):
'''
load and create topology
'''
print('Loading topology...')
Topo.load_topology_from_csv(
'../datasets/CAIDA AS-graph/serial-2/' + sim_data['caida_as_graph_dataset'] + '.as-rel2.txt')
Topo.load_ixps_from_json('../datasets/CAIDA IXPS/' + 'ixs_' + sim_data['caida_ixps_datasets'] + '.jsonl',
'../datasets/CAIDA IXPS/' + 'ix-asns_' + sim_data['caida_ixps_datasets'] + '.jsonl')
Topo.add_extra_p2p_custom_links()
def doTask(self, task_data):
sim_data = task_data["sim_data"]
simulation_uuid = task_data["simulation_uuid"]
'''
load and create topology
'''
Topo = BGPtopology()
self.load_create_Topology(Topo, sim_data)
'''
Set the ASes that are going to do RPKI Route Origin Validation,
according to user preference (rpki_rov_mode)
'''
self.set_rpki_rov(Topo, sim_data)
'''
Set the RPKI ROV table for each AS that do ROV,
according to user preference (realistic_rpki_rov -> realistic or hypothetical)
'''
rpki_rov_table = self.set_rpki_rov_table(Topo, sim_data, "http://localhost:9556/api/v1/validity/")
'''
Launch simulation
'''
sw = SimulationWorker()
sw.start(Topo, sim_data, rpki_rov_table, simulation_uuid)
'''
Update some statistic fields in db for the simulation and save simulation results into json file
(only if, it is the last repetition of all simulations)
'''
sp = SimulationPrinter()
sp.save_statistics(simulation_uuid, sim_data) |
the-stack_0_17875 | from django.db import models
from django.utils import timezone
#from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.contrib.auth.models import User
#------------------------------
#------ USERS
#------------------------------
"""
class CustomUserManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
""
Creates and saves a User with the given email and password.
""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class InvestigaUser(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField('First Name', max_length=200)
last_name = models.CharField('Last Name', max_length=200)
email = models.EmailField('Email', max_length=254, unique=True)
is_staff = models.BooleanField('staff', default=False,
help_text='Designates whether the user can log into this admin '
)
is_active = models.BooleanField('active', default=True,
help_text='Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.')
date_joined = models.DateTimeField('Fecha de inscripcion', default=timezone.now)
objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name','last_name']
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
return self.first_name
def __str__(self):
return self.email
"""
#------------------------------
#------------------------------
#------ START class Scientist
class Scientist(models.Model):
ACADEMIC_DEGREES = (
('MAG','Master'),
('PHD','PhD'),
('MD','MD'),
)
OCCUPATION = (
('PHD_EST','PhD Student'),
('REAS_ASS','Research Assistant'),
('POST_DOC','Postdoctoral Researcher'),
('REAS_DIR','Research Lab. Director'),
('REAS','Researcher'),
('PROF','Professor'),
('R&D_ENG','R&D Engineer'),
('IND','Independent Researcher'),
)
user = models.OneToOneField(User)
academic_degree = models.CharField(max_length=10,choices=ACADEMIC_DEGREES,verbose_name='Academic Degree',blank=False)
academic_field = models.ForeignKey('AcademicDiscipline')
country_residence = models.ForeignKey('Country')
current_occupation = models.CharField(max_length=40,verbose_name='Occupation',blank=False)
working_place = models.CharField(max_length=40,verbose_name='Working Place',blank=False)
languages = models.CharField(max_length=50,verbose_name='Languages', blank=True)
personal_website = models.CharField(max_length=30, verbose_name='Website', blank=True)
researchgate_profile = models.CharField(max_length=30, verbose_name='ResearchGate', blank=True)
linkedin_profile = models.CharField(max_length=30, verbose_name='LinkedIn', blank=True)
twitter_username = models.CharField(max_length=15, verbose_name='Twitter', blank=True)
facebook_profile = models.CharField(max_length=30, verbose_name='Facebook', blank=True)
referenced_by = models.CharField(max_length=150,verbose_name='Referenced by',blank=False)
is_verified = models.BooleanField(verbose_name="Verified Researcher?")
def __str__(self):
return '%s %s' % self.user.first_name, self.user.last_name
#------ END class Scientist
#------------------------------
#------------------------------
#------ START class SchoolTeacher
class SchoolTeacher(models.Model):
user = models.OneToOneField(User)
school = models.ForeignKey('School')
degree = models.CharField(max_length=20,verbose_name='Degree',blank=False)
subjects = models.CharField(max_length=40,verbose_name='Subjects',blank=False)
telephone = models.CharField(max_length=25, verbose_name='Telephone')
twitter_username = models.CharField(max_length=15, verbose_name='Twitter', blank=True)
facebook_profile = models.CharField(max_length=30, verbose_name='Facebook', blank=True)
has_IT_training = models.BooleanField(verbose_name="Has IT training?")
def __str__(self):
return '%s %s' % self.user.first_name, self.user.last_name
#------ END class SchoolTeacher
#------------------------------
#------------------------------
#------ START class Moderator
class Moderator(models.Model):
user = models.OneToOneField(User)
report = models.CharField(max_length=500,verbose_name='Report',blank=False)
def __str__(self):
return '%s %s' % self.user.first_name, self.user.last_name
#------ END class Moderator
#------------------------------
#------ SCIENCE
#------------------------------
#------ START class ScienceField
class ScienceField(models.Model):
SCIENCE_FIELD = (
('LIFE','Life Sciences'),
('APPL','Applied Sciences'),
('SOCIAL','Social Sciences'),
('PHYS','Physical Sciences'),
)
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30, choices=SCIENCE_FIELD, verbose_name='Science Field')
description = models.CharField(max_length=300,verbose_name='Description')
def __str__(self):
return self.name
#------ END class ScienceField
#------------------------------
#------------------------------
#------ START class AcademicDiscipline
class AcademicDiscipline(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, verbose_name='Academic Discipline')
academic_discipline = models.ForeignKey('ScienceField')
def __str__(self):
return self.name
#------ END class AcademicDiscipline
#------------------------------
#------------------------------
#------ START class Country
class Country(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, verbose_name='Country')
def __str__(self):
return self.name
#------ END class Country
#------------------------------
#------------------------------
#------------------------------
#------ SCHOOLS
#------------------------------
#------ START class School
class School(models.Model):
SCHOOL_LEVELS = (
('ELEMS','Elementary School'),
('PRIMS','Primary School'),
('SECUS','Secondary School'),
)
# Location
department = models.CharField(max_length=30,verbose_name='Department')
# Town
town = models.CharField(max_length=30,verbose_name='Town')
max_level = models.CharField(max_length=20,choices=SCHOOL_LEVELS,verbose_name='Max. level')
director_fullname = models.CharField(max_length=25, verbose_name='Full name School director')
address = models.CharField(max_length=300,verbose_name='Address')
telephone = models.CharField(max_length=25, verbose_name='Telephone')
email = models.EmailField(max_length=254, verbose_name='School email')
facebook_url = models.CharField(max_length=20, verbose_name='Facebook', blank=True)
school_website = models.CharField(max_length=20, verbose_name='Website', blank=True)
has_projector = models.BooleanField(verbose_name="Has projector?")
has_screen = models.BooleanField(verbose_name="Has screen?")
has_webcam = models.BooleanField(verbose_name="Has webcam?")
has_videoconf_room = models.BooleanField(verbose_name="Has videoconf room?")
has_microphone = models.BooleanField(verbose_name="Has microphone?")
has_speakers = models.BooleanField(verbose_name="Has speakers?")
def __str__(self):
return self.user.username
#------ END class School
#------------------------------
#------------------------------
#------------------------------
#------ SESSIONS
#------------------------------
#------ START class VideoconfSession
class VideoconfSession(models.Model):
STATUS_SCHOOL = (
('CANCEL','Cancelled'),
('VER_REQ','To verify requirements'),
('TO_SCH','To be scheduled'),
('SCHED','Scheduled'),
('IN_PROG','In progress'),
('COMPLET','Completed'),
)
STATUS_SCIENTIST = (
('CANCEL','Cancelled'),
('READING','Preparing reading'),
('TRAIN','Training'),
('TO_SCH','To be scheduled'),
('SCHED','Scheduled'),
('IN_PROG','In progress'),
('COMPLET','Completed'),
)
id = models.AutoField(primary_key=True)
selected_date = models.DateField()
selected_hour = models.DateTimeField()
status_school = models.CharField(max_length=7, choices=STATUS_SCHOOL, verbose_name='Status school')
status_scientist = models.CharField(max_length=7, choices=STATUS_SCIENTIST, verbose_name='Status scientist')
school_teacher = models.ForeignKey('SchoolTeacher')
scientist = models.ForeignKey('Scientist')
moderator = models.ForeignKey('Moderator')
#------ END class VideoconfSession
#------------------------------
#------------------------------ |
the-stack_0_17876 | # -*- coding: utf-8 -*-
#
# Copyright 2019 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Third party data registry integration."""
from urllib.parse import urlparse
from renku.cli._providers.zenodo import ZenodoProvider
from renku.utils.doi import is_doi
class ProviderFactory:
"""Create a provider type from URI."""
PROVIDERS = {'zenodo': ZenodoProvider}
@staticmethod
def from_uri(uri):
"""Get provider type based on uri."""
is_doi_ = is_doi(uri)
if is_doi_ is False:
url = urlparse(uri)
if bool(url.scheme and url.netloc and url.params == '') is False:
return None, 'Cannot parse URL.'
provider = None
if 'zenodo' in uri:
provider = ZenodoProvider(is_doi=is_doi_)
if is_doi_ and provider is None:
return None, (
'Provider {} not found. '.format(
uri.split('/')[1].split('.')[0] # Get DOI provider name.
) + 'Currently supporting following providers: (Zenodo, )'
)
return provider, None
@staticmethod
def from_id(provider_id):
"""Get provider type based on identifier."""
return ProviderFactory.PROVIDERS[provider_id]()
|
the-stack_0_17878 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Christopher MacGown
#
# Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
# http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
# http://opensource.org/licenses/MIT>, at your option. This file may not be
# copied, modified, or distributed except according to those terms.
#
#
# pylint: disable=missing-docstring
import contextlib
import fcntl
import os
import sys
import re
import time
import unittest
ERROR = None
try:
import liblambda
except ImportError as err:
ERROR = err
liblambda = None
@contextlib.contextmanager
def capture_stdout():
'''capture_stdout()
Store the stdout file descriptor and redirect it to the write-side of
the created pipe. We put the read-side into non-blocking mode so that
we can capture the output later.
This is only necessary because Rust writes directly to the stdout fd
and doing the typical sys.stdout song and dance in Python doesn't
capture things coming out of `liblambda`.
This is a context-manager and it yields the read-side of the pipe, so
it needs to be read and closed (for good housekeeping).
'''
# Setup pipe
read, write = os.pipe()
read, write = os.fdopen(read, 'rb'), os.fdopen(write, 'wb')
fcntl.fcntl(read, fcntl.F_SETFL, os.O_NONBLOCK)
fd = sys.stdout.fileno() # pylint: disable=invalid-name
with os.fdopen(os.dup(fd), 'wb') as copied:
sys.stdout.flush()
os.dup2(write.fileno(), fd) # redirect STDOUT -> the write FD.
try:
yield read
finally:
sys.stdout.flush()
os.dup2(copied.fileno(), fd) # redirect back.
write.close()
def now_in_millis():
return int(round(time.time() * 1000.0))
class FakeContext(object): # pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__(self, timeout=3000):
self._start = now_in_millis()
self._timeout = timeout
self.function_name = 'fake_function'
self.function_version = '$LATEST'
self.invoked_function_arn = 'arn:aws:lambda:XX-TEST-1:999999999999:function:fake_function' # pylint: disable=line-too-long
self.memory_limit_in_mb = '128'
self.aws_request_id = '1f8958d8-b20b-4a3c-b8fb-78896d10a9e5'
self.log_group_name = '/aws/lambda/fake_function'
# Date and coordinates of the Battle of Hastings - just for test data.
self.log_stream_name = '1066/10/14/[$LATEST]00000000000000000000505443002915' # pylint: disable=line-too-long
def get_remaining_time_in_millis(self):
return self._timeout - (now_in_millis() - self._start)
def consume(reader):
try:
return reader.read().decode(sys.stdout.encoding)
finally:
reader.close()
class TestCrowbar(unittest.TestCase):
def setUp(self):
self.context = FakeContext(timeout=100)
if not getattr(self, 'assertRegex', None):
# assertRegexpMatches is deprecated in 3.6, so make sure python2.7
# calls the method the same thing.
setattr(self, 'assertRegex', self.assertRegexpMatches)
def test_00_import_liblambda(self): # pylint: disable=no-self-use
# This makes the import failure a little friendlier.
if liblambda is None:
print("Could not import liblambda: {}".format(ERROR))
@unittest.skipIf(liblambda is None, "Could not import liblambda")
@unittest.skipUnless(os.environ["EXAMPLE"] == "echo", "DISABLED")
def test_01_echo_short_timeout(self):
expectation = re.compile(r'hello cloudwatch logs from (?P<name>\w+) '
r'version (?P<version>\${0,1}\w+),'
r'.(?P<time>\d+) ms.*')
time.sleep(0.01)
with capture_stdout() as stdout:
self.assertEqual(liblambda.handler("echo", self.context), "echo")
output = consume(stdout)
matches = re.match(expectation, output)
self.assertRegex(output, expectation, "")
self.assertIn(int(matches.group('time')),
[87, 88, 89, 90],
"{} not in [87, 88, 89, 90]".format(matches.group('time')))
@unittest.skipIf(liblambda is None, "Could not import liblambda")
@unittest.skipUnless(os.environ["EXAMPLE"] == "ec2_regions", "DISABLED")
def test_01_ec2_regions_short_timeout(self): # pylint: disable=invalid-name
os.environ["AWS_DEFAULT_REGION"] = "us-east-1"
with capture_stdout() as stdout:
self.assertEqual(liblambda.handler("list-regions", self.context),
['ap-south-1',
'eu-west-2',
'eu-west-1',
'ap-northeast-2',
'ap-northeast-1',
'sa-east-1',
'ca-central-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',])
output = consume(stdout)
self.assertEqual(output, "", "Unexpected STDOUT output")
@unittest.skipIf(liblambda is None, "Could not import liblambda")
@unittest.skipUnless(os.environ["EXAMPLE"] == "echo", "DISABLED")
def test_02_echo_long_timeout(self):
# This test is a duplicate of test_01_echo, but with a longer deadline. Not necessarily
# the most exhaustive method of testing, but I wanted to show that.
expectation = re.compile(r'hello cloudwatch logs from (?P<name>\w+) '
r'version (?P<version>\${0,1}\w+),'
r'.(?P<time>\d+) ms.*')
context = FakeContext() # 3 seconds.
time.sleep(0.001)
with capture_stdout() as stdout:
self.assertEqual(liblambda.handler("echo", context), "echo")
output = consume(stdout)
matches = re.match(expectation, output)
self.assertRegex(output, expectation, "unexpected")
self.assertIn(int(matches.group('time')),
[2998, 2999, 3000],
"{} not in [2998, 2999, 3000]".format(matches.group('time')))
@unittest.skipIf(liblambda is None, "Could not import liblambda")
@unittest.skipUnless(os.environ["EXAMPLE"] == "ec2_regions", "DISABLED")
def test_02_ec2_regions_long_timeout(self): # pylint: disable=invalid-name
context = FakeContext() # 3 seconds.
time.sleep(0.001)
with capture_stdout() as stdout:
self.assertEqual(liblambda.handler("list-regions", context),
['ap-south-1',
'eu-west-2',
'eu-west-1',
'ap-northeast-2',
'ap-northeast-1',
'sa-east-1',
'ca-central-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',])
output = consume(stdout)
self.assertEqual(output, "", "Unexpected STDOUT output")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_17879 | """Testing v0x04 FlowRemoved message."""
from pyof.v0x04.asynchronous.flow_removed import FlowRemoved, FlowRemovedReason
from pyof.v0x04.common.flow_match import (
Match, MatchType, OxmClass, OxmOfbMatchField, OxmTLV)
from tests.test_struct import TestStruct
class TestFlowRemovedMsg(TestStruct):
"""FlowRemoved message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_flow_removed')
super().set_raw_dump_object(FlowRemoved, xid=0,
cookie=0x0000000000000000, priority=1000,
reason=FlowRemovedReason.OFPRR_DELETE,
table_id=0, duration_sec=77,
duration_nsec=559000000, idle_timeout=0,
hard_timeout=0, packet_count=0,
byte_count=0, match=_new_match())
super().set_minimum_size(56)
def _new_match():
"""Crate new Match instance."""
tlv1 = OxmTLV(oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,
oxm_field=OxmOfbMatchField.OFPXMT_OFB_ETH_TYPE,
oxm_hasmask=False, oxm_value=b'\x88\xcc')
tlv2 = OxmTLV(oxm_class=OxmClass.OFPXMC_OPENFLOW_BASIC,
oxm_field=OxmOfbMatchField.OFPXMT_OFB_VLAN_VID,
oxm_hasmask=False, oxm_value=b'\x1e\xd7')
return Match(match_type=MatchType.OFPMT_OXM,
oxm_match_fields=[tlv1, tlv2])
|
the-stack_0_17881 | print('===== DESAFIO 69 =====')
maiorid = homem = mulher = 0
while True:
print('-' * 30)
print(' CADASTRE UMA PESSOA ')
print('-' * 30)
idade = int(input('IDADE: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('SEXO [M/F]: ')).strip().upper()
continua = ' '
while continua not in 'SN':
continua = str(input('QUER CONTINUAR? [S/N]: ')).strip().upper()
if idade >= 18:
maiorid += 1
if sexo == 'M':
homem += 1
if sexo == 'F' and idade <= 20:
mulher += 1
if continua == 'N':
break
print('===== FIM DO PROGRAMA =====')
print(f'Total de pessoas com mais de 18 anos: {maiorid}')
print(f'Ao todo temos {homem} homens cadastrados')
print(f'Temos {mulher} mulheres com menos de 20 anos de idade')
|
the-stack_0_17882 | import pytest
import sympy as sp
from qbee import EquationSystem, derivatives, polynomialize
x, y, z = sp.symbols('x, y, z')
dot_x, dot_y, dot_z = derivatives('x, y, z')
def assert_check_poly(expected_system: EquationSystem, actual_system: EquationSystem):
try:
assert actual_system.equations == expected_system.equations
except AssertionError as e:
if actual_system.is_polynomial():
raise AssertionError("Systems are not equal but actual system is polynomial.")
else:
raise e
def test_already_polynomial():
system = EquationSystem([
sp.Eq(dot_x, x + x ** 2 + 3)
])
assert polynomialize(system).equations == system.equations
def test_sigmoid_diff():
system = EquationSystem([
sp.Eq(dot_x, 1 / (1 + sp.exp(x)))
])
poly_system = polynomialize(system)
_, y0, y1 = poly_system.variables.free
dot_y0, dot_y1 = derivatives([y0, y1])
expected_system = EquationSystem([
sp.Eq(dot_x, y1),
sp.Eq(dot_y0, y0 * y1),
sp.Eq(dot_y1, -y0 * y1 ** 3)
])
assert_check_poly(expected_system, poly_system)
def test_parameter():
k = sp.Symbol('k')
system = EquationSystem([
sp.Eq(dot_x, sp.exp(k * x)),
sp.Eq(dot_y, sp.exp(k * x))
], parameter_variables=[k])
poly_system = polynomialize(system, new_var_name="w", start_new_vars_with=0)
w0 = sp.Symbol("w{0}")
dw0 = derivatives("w{0}")
expected_system = EquationSystem([
sp.Eq(dot_x, w0),
sp.Eq(dot_y, w0),
sp.Eq(dw0, k * w0 ** 2)
], parameter_variables=[k])
assert_check_poly(expected_system, poly_system)
|
the-stack_0_17883 | import torch
from package.util import util_mixins
class SamplingResult(util_mixins.NiceRepr):
"""
Example:
>>> # xdoctest: +IGNORE_WANT
>>> self = SamplingResult.random(rng=10)
>>> print('self = {}'.format(self))
self = <SamplingResult({
'neg_bboxes': torch.Size([12, 4]),
'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
'num_gts': 4,
'pos_assigned_gt_inds': tensor([], dtype=torch.int64),
'pos_bboxes': torch.Size([0, 4]),
'pos_inds': tensor([], dtype=torch.int64),
'pos_is_gt': tensor([], dtype=torch.uint8)
})>
"""
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, 4)
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
return torch.cat([self.pos_bboxes, self.neg_bboxes])
def to(self, device):
"""
Change the device of the data inplace.
Example:
>>> self = SamplingResult.random()
>>> print('self = {}'.format(self.to(None)))
>>> # xdoctest: +REQUIRES(--gpu)
>>> print('self = {}'.format(self.to(0)))
"""
_dict = self.__dict__
for key, value in _dict.items():
if isinstance(value, torch.Tensor):
_dict[key] = value.to(device)
return self
def __nice__(self):
data = self.info.copy()
data['pos_bboxes'] = data.pop('pos_bboxes').shape
data['neg_bboxes'] = data.pop('neg_bboxes').shape
parts = ['\'{}\': {!r}'.format(k, v) for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self):
"""
Returns a dictionary of info about the object
"""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_bboxes': self.pos_bboxes,
'neg_bboxes': self.neg_bboxes,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
|
the-stack_0_17884 | #!/usr/bin/env python
import os
import cffi
if __name__ == "__main__":
ffi = cffi.FFI()
with open(os.path.join(os.path.dirname(__file__), "Point.h")) as f:
ffi.cdef(f.read())
ffi.set_source("_point",
# Since we are calling a fully built library directly no custom source
# is necessary. We need to include the .h files, though, because behind
# the scenes cffi generates a .c file which contains a Python-friendly
# wrapper around each of the functions.
'#include "Point.h"',
# The important thing is to include the pre-built lib in the list of
# libraries we are linking against:
libraries=["point"],
library_dirs=[os.path.dirname(__file__),],
)
ffi.compile()
|
the-stack_0_17885 | if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable
import matplotlib.pyplot as plt
# y = x^4 - 2x^2 optimization with newton's method
def f(x):
y = x**4 - 2 * x**2
return y
def gx2(x):
return 12 * x**2 - 4
if __name__ == '__main__':
# gradient descent vs newton
x = Variable(np.array(2.0))
iters = 10
newton_x = []
newton_y = []
for i in range(iters):
if not isinstance(x.data, np.ndarray):
newton_x.append(np.array(x.data))
else:
newton_x.append(x.data)
y = f(x)
newton_y.append(y.data)
x.cleargrad()
y.backward()
x.data = x.data - x.grad / gx2(x.data)
print(x.data, y.data)
x = Variable(np.array(2.0))
iters = 400
lr = 0.001
grad_descent_x = []
grad_descent_y = []
for i in range(iters):
y = f(x)
grad_descent_x.append(x.data)
grad_descent_y.append(y.data)
x.cleargrad()
y.backward()
x.data = x.data - lr * x.grad
x = np.arange(-1.5, 2.2, 0.1)
y = f(x)
plt.scatter(grad_descent_x, grad_descent_y, label='Gradient Descent')
plt.scatter(newton_x, newton_y, marker='*', color='violet', label='Newton\'s method')
plt.plot(x, y, color='blue')
plt.legend()
title = r"f(x) = $y=x^4-2x^2$"
plt.title(title)
plt.show()
|
the-stack_0_17887 | import FWCore.ParameterSet.Config as cms
process = cms.Process("DQM")
# DQM service
process.load("DQMServices.Core.DQMStore_cfi")
# MessageLogger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
# Global Tag
from Configuration.AlCa.GlobalTag import GlobalTag as customiseGlobalTag
process.GlobalTag = customiseGlobalTag(globaltag = '80X_dataRun2_HLT_v12')
process.GlobalTag.connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS')
# Source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Run2016B/HLTPhysics2/RAW/v1/000/272/022/00000/4CE23DEB-CB0D-E611-A6AC-02163E01181C.root'
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1000) )
# unpack L1 digis
process.load("EventFilter.L1TRawToDigi.gtStage2Digis_cfi")
process.gtStage2Digis.InputLabel = cms.InputTag( "rawDataCollector" )
process.load("DQM.HLTEvF.triggerBxMonitor_cfi")
process.triggerBxMonitor.l1tResults = cms.untracked.InputTag('gtStage2Digis')
process.triggerBxMonitor.hltResults = cms.untracked.InputTag('TriggerResults', '', 'HLT')
process.load('DQMServices.Components.DQMFileSaver_cfi')
process.dqmSaver.workflow = "/HLT/TriggerBxMonitor/All"
process.endp = cms.EndPath( process.gtStage2Digis + process.triggerBxMonitor + process.dqmSaver )
|
the-stack_0_17888 | #!/usr/bin/env python
#
# Use the raw transactions API to spend eacoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a eacoind or EACoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the EACoin Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/EACoinCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "EACoinCore")
return os.path.expanduser("~/.eacoin")
def read_bitcoin_config(dbdir):
"""Read the eacoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "eacoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a EACoin Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19998 if testnet else 9998
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the eacoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(eacoind):
info = eacoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
eacoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = eacoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(eacoind):
address_summary = dict()
address_to_account = dict()
for info in eacoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = eacoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = eacoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-eacoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(eacoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(eacoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to eacoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = eacoind.createrawtransaction(inputs, outputs)
signed_rawtx = eacoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(eacoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = eacoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(eacoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = eacoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(eacoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get eacoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send eacoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of eacoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
eacoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(eacoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(eacoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(eacoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(eacoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = eacoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_0_17889 | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
General Asset Bundler Batch Tests
"""
# Import builtin libraries
import pytest
import logging
import os
import subprocess
import re
from typing import List, Optional, Dict
import time
# Import LyTestTools
import ly_test_tools.builtin.helpers as helpers
import ly_test_tools.environment.file_system as fs
import ly_test_tools.environment.waiter as waiter
from ..ap_fixtures.ap_setup_fixture import ap_setup_fixture as ap_setup_fixture
from ..ap_fixtures.asset_processor_fixture import asset_processor
from ..ap_fixtures.timeout_option_fixture import timeout_option_fixture as timeout
# fmt:off
from ..ap_fixtures.bundler_batch_setup_fixture \
import bundler_batch_setup_fixture as bundler_batch_helper
# fmt:on
from ..ap_fixtures.ap_config_backup_fixture import ap_config_backup_fixture as config_backup
# Import LyShared
import ly_test_tools.o3de.pipeline_utils as utils
from ly_test_tools.o3de.asset_processor import ASSET_PROCESSOR_PLATFORM_MAP
win_and_mac_platforms = [ASSET_PROCESSOR_PLATFORM_MAP['windows'],
ASSET_PROCESSOR_PLATFORM_MAP['mac']]
# Just some platforms for filename computation (doesn't matter which)
platforms = {}
for key, value in ASSET_PROCESSOR_PLATFORM_MAP.items():
platforms[value] = key
# Use the following logging pattern to hook all test logging together:
logger = logging.getLogger(__name__)
# Configuring the logging is done in ly_test_tools at the following location:
# ~/dev/Tools/LyTestTools/ly_test_tools/log/py_logging_util.py
# Helper: variables we will use for parameter values in the test:
targetProjects = ["AutomatedTesting"]
@pytest.fixture
def local_resources(request, workspace, ap_setup_fixture):
# Test-level asset folder. Directory contains a subfolder for each test (i.e. C1234567)
ap_setup_fixture["tests_dir"] = os.path.dirname(os.path.realpath(__file__))
@pytest.mark.usefixtures("local_resources")
@pytest.mark.parametrize("project", targetProjects)
@pytest.mark.assetpipeline
@pytest.mark.SUITE_periodic
class TestsAssetBundlerBatch_WindowsAndMac(object):
"""
Asset Bundler Batch Tests for all platforms
"""
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877174")
@pytest.mark.test_case_id("C16877175")
@pytest.mark.test_case_id("C16877178")
@pytest.mark.test_case_id("C16877177")
def test_WindowsAndMac_RunHelpCmd_ZeroExitCode(self, workspace, bundler_batch_helper):
"""
Simple calls to all AssetBundlerBatch --help to make sure a non-zero exit codes are returned.
Test will call each Asset Bundler Batch sub-command with help and will error on a non-0 exit code
"""
bundler_batch_helper.call_bundlerbatch(help="")
bundler_batch_helper.call_seeds(help="")
bundler_batch_helper.call_assetLists(help="")
bundler_batch_helper.call_comparisonRules(help="")
bundler_batch_helper.call_compare(help="")
bundler_batch_helper.call_bundleSettings(help="")
bundler_batch_helper.call_bundles(help="")
bundler_batch_helper.call_bundleSeed(help="")
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877175")
@pytest.mark.skip("'animations/animationeditorfiles/sample1.animgraph' missing, needs investigation")
def test_WindowsAndMac_CreateAssetList_DependenciesCorrect(self, workspace, bundler_batch_helper):
r"""
Tests that an asset list created maps dependencies correctly.
testdependencieslevel\level.pak and lists of known dependencies are used for validation
Test Steps:
1. Create an asset list from the level.pak
2. Create Lists of expected assets in the level.pak
3. Add lists of expected assets to a single list
4. Compare list of expected assets to actual assets
"""
helper = bundler_batch_helper
# Create the asset list file
helper.call_assetLists(
addSeed=r"levels\testdependencieslevel\level.pak",
assetListFile=helper['asset_info_file_request']
)
assert os.path.isfile(helper["asset_info_file_result"])
# Lists of known relative locations of assets
default_level_assets = [
"engineassets/texturemsg/defaultnouvs.dds",
"engineassets/texturemsg/defaultnouvs.dds.1",
"engineassets/texturemsg/defaultnouvs.dds.2",
"engineassets/texturemsg/defaultnouvs.dds.3",
"engineassets/texturemsg/defaultnouvs.dds.4",
"engineassets/texturemsg/defaultnouvs.dds.5",
"engineassets/texturemsg/defaultnouvs.dds.6",
"engineassets/texturemsg/defaultnouvs.dds.7",
"engineassets/texturemsg/defaultnouvs_ddn.dds",
"engineassets/texturemsg/defaultnouvs_ddn.dds.1",
"engineassets/texturemsg/defaultnouvs_ddn.dds.2",
"engineassets/texturemsg/defaultnouvs_ddn.dds.3",
"engineassets/texturemsg/defaultnouvs_ddn.dds.4",
"engineassets/texturemsg/defaultnouvs_ddn.dds.5",
"engineassets/texturemsg/defaultnouvs_spec.dds",
"engineassets/texturemsg/defaultnouvs_spec.dds.1",
"engineassets/texturemsg/defaultnouvs_spec.dds.2",
"engineassets/texturemsg/defaultnouvs_spec.dds.3",
"engineassets/texturemsg/defaultnouvs_spec.dds.4",
"engineassets/texturemsg/defaultnouvs_spec.dds.5",
"engineassets/textures/defaults/16_grey.dds",
"engineassets/textures/cubemap/default_level_cubemap.dds",
"engineassets/textures/cubemap/default_level_cubemap.dds.1",
"engineassets/textures/cubemap/default_level_cubemap.dds.2",
"engineassets/textures/cubemap/default_level_cubemap.dds.3",
"engineassets/textures/cubemap/default_level_cubemap.dds.4",
"engineassets/textures/cubemap/default_level_cubemap_diff.dds",
"engineassets/materials/water/ocean_default.mtl",
"engineassets/textures/defaults/spot_default.dds",
"engineassets/textures/defaults/spot_default.dds.1",
"engineassets/textures/defaults/spot_default.dds.2",
"engineassets/textures/defaults/spot_default.dds.3",
"engineassets/textures/defaults/spot_default.dds.4",
"engineassets/textures/defaults/spot_default.dds.5",
"materials/material_terrain_default.mtl",
"textures/skys/night/half_moon.dds",
"textures/skys/night/half_moon.dds.1",
"textures/skys/night/half_moon.dds.2",
"textures/skys/night/half_moon.dds.3",
"textures/skys/night/half_moon.dds.4",
"textures/skys/night/half_moon.dds.5",
"textures/skys/night/half_moon.dds.6",
"engineassets/materials/sky/sky.mtl",
"levels/testdependencieslevel/level.pak",
"levels/testdependencieslevel/terrain/cover.ctc",
"levels/testdependencieslevel/terraintexture.pak",
]
sequence_material_cube_assets = [
"textures/test_texture_sequence/test_texture_sequence000.dds",
"textures/test_texture_sequence/test_texture_sequence001.dds",
"textures/test_texture_sequence/test_texture_sequence002.dds",
"textures/test_texture_sequence/test_texture_sequence003.dds",
"textures/test_texture_sequence/test_texture_sequence004.dds",
"textures/test_texture_sequence/test_texture_sequence005.dds",
"objects/_primitives/_box_1x1.cgf",
"materials/test_texture_sequence.mtl",
"objects/_primitives/_box_1x1.mtl",
"textures/_primitives/middle_gray_checker.dds",
"textures/_primitives/middle_gray_checker.dds.1",
"textures/_primitives/middle_gray_checker.dds.2",
"textures/_primitives/middle_gray_checker.dds.3",
"textures/_primitives/middle_gray_checker.dds.4",
"textures/_primitives/middle_gray_checker.dds.5",
"textures/_primitives/middle_gray_checker_ddn.dds",
"textures/_primitives/middle_gray_checker_ddn.dds.1",
"textures/_primitives/middle_gray_checker_ddn.dds.2",
"textures/_primitives/middle_gray_checker_ddn.dds.3",
"textures/_primitives/middle_gray_checker_ddn.dds.4",
"textures/_primitives/middle_gray_checker_ddn.dds.5",
"textures/_primitives/middle_gray_checker_spec.dds",
"textures/_primitives/middle_gray_checker_spec.dds.1",
"textures/_primitives/middle_gray_checker_spec.dds.2",
"textures/_primitives/middle_gray_checker_spec.dds.3",
"textures/_primitives/middle_gray_checker_spec.dds.4",
"textures/_primitives/middle_gray_checker_spec.dds.5",
]
character_with_simplified_material_assets = [
"objects/characters/jack/jack.actor",
"objects/characters/jack/jack.mtl",
"objects/characters/jack/textures/jack_diff.dds",
"objects/characters/jack/textures/jack_diff.dds.1",
"objects/characters/jack/textures/jack_diff.dds.2",
"objects/characters/jack/textures/jack_diff.dds.3",
"objects/characters/jack/textures/jack_diff.dds.4",
"objects/characters/jack/textures/jack_diff.dds.5",
"objects/characters/jack/textures/jack_diff.dds.6",
"objects/characters/jack/textures/jack_diff.dds.7",
"objects/characters/jack/textures/jack_spec.dds",
"objects/characters/jack/textures/jack_spec.dds.1",
"objects/characters/jack/textures/jack_spec.dds.2",
"objects/characters/jack/textures/jack_spec.dds.3",
"objects/characters/jack/textures/jack_spec.dds.4",
"objects/characters/jack/textures/jack_spec.dds.5",
"objects/characters/jack/textures/jack_spec.dds.6",
"objects/characters/jack/textures/jack_spec.dds.7",
"objects/default/editorprimitive.mtl",
"engineassets/textures/grey.dds",
"animations/animationeditorfiles/sample0.animgraph",
"animations/motions/jack_death_fall_back_zup.motion",
"animations/animationeditorfiles/sample1.animgraph",
"animations/animationeditorfiles/sample0.motionset",
"animations/motions/rin_jump.motion",
"animations/animationeditorfiles/sample1.motionset",
"animations/motions/rin_idle.motion",
"animations/motions/jack_idle_aim_zup.motion",
]
spawner_assets = [
"slices/sphere.dynamicslice",
"objects/default/primitive_sphere.cgf",
"test1.luac",
"test2.luac",
]
ui_canvas_assets = [
"fonts/vera.ttf",
"fonts/vera.font",
"scriptcanvas/mainmenu.scriptcanvas_compiled",
"fonts/vera.fontfamily",
"ui/canvas/start.uicanvas",
"fonts/vera-italic.font",
"ui/textureatlas/sample.texatlasidx",
"fonts/vera-bold-italic.ttf",
"fonts/vera-bold.font",
"ui/textures/prefab/button_normal.dds",
"ui/textures/prefab/button_normal.sprite",
"fonts/vera-italic.ttf",
"ui/textureatlas/sample.dds",
"fonts/vera-bold-italic.font",
"fonts/vera-bold.ttf",
"ui/textures/prefab/button_disabled.dds",
"ui/textures/prefab/button_disabled.sprite",
]
wwise_and_atl_assets = [
"libs/gameaudio/wwise/levels/testdependencieslevel/test_dependencies_level.xml",
"sounds/wwise/test_bank3.bnk",
"sounds/wwise/test_bank4.bnk",
"sounds/wwise/test_bank5.bnk",
"sounds/wwise/test_bank1.bnk",
"sounds/wwise/init.bnk",
"sounds/wwise/499820003.wem",
"sounds/wwise/196049145.wem",
]
particle_library_assets = [
"libs/particles/milestone2particles.xml",
"textures/milestone2/particles/fx_launchermuzzlering_01.dds",
"textures/milestone2/particles/fx_launchermuzzlering_01.dds.1",
"textures/milestone2/particles/fx_launchermuzzlering_01.dds.2",
"textures/milestone2/particles/fx_launchermuzzlering_01.dds.3",
"textures/milestone2/particles/fx_launchermuzzlering_01.dds.4",
"textures/milestone2/particles/fx_launchermuzzlering_01.dds.5",
"textures/milestone2/particles/fx_sparkstreak_01.dds",
"textures/milestone2/particles/fx_launchermuzzlefront_01.dds",
"textures/milestone2/particles/fx_launchermuzzlefront_01.dds.1",
"textures/milestone2/particles/fx_launchermuzzlefront_01.dds.2",
"textures/milestone2/particles/fx_launchermuzzlefront_01.dds.3",
"textures/milestone2/particles/fx_launchermuzzlefront_01.dds.4",
"textures/milestone2/particles/fx_launchermuzzlefront_01.dds.5",
]
lens_flares_library_assets = ["libs/flares/flares.xml", "textures/lights/flare01.dds"]
expected_assets_list = default_level_assets
expected_assets_list.extend(sequence_material_cube_assets)
expected_assets_list.extend(character_with_simplified_material_assets)
expected_assets_list.extend(spawner_assets)
expected_assets_list.extend(ui_canvas_assets)
expected_assets_list.extend(wwise_and_atl_assets)
expected_assets_list.extend(particle_library_assets)
expected_assets_list.extend(lens_flares_library_assets) # All expected assets
# Get actual calculated dependencies from the asset list created
actual_assets_list = []
for rel_path in helper.get_asset_relative_paths(helper["asset_info_file_result"]):
actual_assets_list.append(rel_path)
assert sorted(actual_assets_list) == sorted(expected_assets_list)
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877175")
def test_WindowsAndMac_GenerateDebugInfo_DoesNotEffectOutputFile(self, workspace, bundler_batch_helper):
"""
Validates destructive overwriting for asset lists and
that generating debug information does not affect asset list creation
1. Create an asset list from seed_list
2. Validate asset list was created
3. Read and store contents of asset list into memory
4. Attempt to create a new asset list in without using --allowOverwrites
5. Verify that Asset Bundler returns false
6. Verify that file contents of the orignally created asset list did not change from what was stored in memory
7. Attempt to create a new asset list without debug while allowing overwrites
8. Verify that file contents of the orignally created asset list changed from what was stored in memory
"""
helper = bundler_batch_helper
seed_list = os.path.join(workspace.paths.engine_root(), "Assets", "Engine", "SeedAssetList.seed") # Engine seed list
asset = r"levels\testdependencieslevel\level.pak"
# Create Asset list
helper.call_assetLists(
seedListFile=seed_list,
assetListFile=helper['asset_info_file_request'],
)
# Validate file was created
assert os.path.isfile(helper["asset_info_file_result"])
# Read asset list contents to compare before and after destructive overwrite
with open(helper["asset_info_file_result"], "r") as asset_list_file:
file_contents = asset_list_file.read()
# Make sure destructive overwrite will fail without --allowOverwrites
# Try overwriting the existing file without --allowOverwrites (should fail)
result, _ = helper.call_assetLists(seedListFile=seed_list, addSeed=asset,
assetListFile=helper["asset_info_file_request"])
assert result is False, "Destructive overwrite without override DID NOT fail"
# Make sure file contents DID NOT change
# fmt:off
with open(helper["asset_info_file_result"], "r") as asset_list_file:
assert file_contents == asset_list_file.read(), \
"File was changed even though the Destructive overwrite failed without override."
# fmt:on
# Create the asset list file without generating debug info (and overwriting existing file)
helper.call_assetLists(
addSeed=asset,
assetListFile=helper["asset_info_file_request"],
allowOverwrites=""
)
# Make sure file contents DID change
# fmt:off
with open(helper["asset_info_file_result"], "r") as asset_list_file:
assert file_contents != asset_list_file.read(), \
"File was NOT changed even though the Destructive overwrite was allowed."
# fmt:on
# Get list of all files (relative paths) in generated asset list (no debug file created)
assets_generated_without_debug_info = []
for rel_path in helper.get_asset_relative_paths(helper["asset_info_file_result"]):
assets_generated_without_debug_info.append(rel_path)
# Create the asset list file while also generating debug info
helper.call_assetLists(
addSeed=asset,
allowOverwrites="",
generateDebugFile="",
assetListFile=helper["asset_info_file_request"]
)
assert os.path.isfile(helper["asset_info_file_result"])
# Get list of all files (relative paths) in generated asset list (debug file created)
assets_generated_with_debug_info = []
for rel_path in helper.get_asset_relative_paths(helper["asset_info_file_result"]):
assets_generated_with_debug_info.append(rel_path)
# Compare assets from asset lists
assert sorted(assets_generated_without_debug_info) == sorted(assets_generated_with_debug_info)
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877175")
@pytest.mark.test_case_id("C16877177")
def test_WindowsAndMac_BundlesAndBundleSettings_EquivalentOutput(self, workspace, bundler_batch_helper):
"""
Validates bundle creation both through the 'bundles' and 'bundlesettings'
subcommands.
Test Steps:
1. Create an asset list
2. Create a bundle with the asset list and without a bundle settings file
3. Create a bundle with the asset list and a bundle settings file
4. Validate calling bundle doesn't perform destructive overwrite without --allowOverwrites
5. Calling bundle again with --alowOverwrites performs destructive overwrite
6. Validate contents of original bundle and overwritten bundle
"""
helper = bundler_batch_helper
seed_list = os.path.join(workspace.paths.engine_root(), "Assets", "Engine", "SeedAssetList.seed") # Engine seed list
asset = r"levels\testdependencieslevel\level.pak"
# Useful bundle locations / names (2 for comparing contents)
# fmt:off
platform_bundle_file_1 = os.path.join(
helper["test_dir"],
helper.platform_file_name(helper["bundle_file_name"], workspace.asset_processor_platform))
second_bundle_file_request = os.path.join(helper["test_dir"], "bundle_2.pak")
platform_bundle_file_2 = os.path.join(
helper["test_dir"], helper.platform_file_name("bundle_2.pak", workspace.asset_processor_platform))
# fmt:on
# Extraction directories
extract_dir_1 = os.path.join(helper["test_dir"], "ExtractDir1")
extract_dir_2 = os.path.join(helper["test_dir"], "ExtractDir2")
# Create asset list to test bundles on
helper.call_assetLists(
addSeed=asset,
seedListFile=seed_list,
assetListFile=helper["asset_info_file_request"],
)
# Make a bundle using 'bundles' command
helper.call_bundles(
assetListFile=helper["asset_info_file_result"],
outputBundlePath=helper["bundle_file"],
maxSize=helper["max_bundle_size_in_mib"],
)
# Make a bundle setting using the 'bundleSettings' command
helper.call_bundleSettings(
bundleSettingsFile=helper["bundle_settings_file_request"],
assetListFile=helper["asset_info_file_result"],
outputBundlePath=second_bundle_file_request,
maxSize=helper["max_bundle_size_in_mib"],
)
# Recreate bundle via bundle settings this time
helper.call_bundles(bundleSettingsFile=helper["bundle_settings_file_result"])
# Make sure destructive overwrite fails without --allowOverwrites
result, _ = helper.call_bundles(bundleSettingsFile=helper["bundle_settings_file_result"])
assert result is False, "bundles call DID NOT fail when overwriting without --allowOverrides"
# Run again to check overriding (this time it should work)
helper.call_bundles(
bundleSettingsFile=helper["bundle_settings_file_result"],
allowOverwrites="",
)
# Validate results
helper.extract_and_check(extract_dir_1, platform_bundle_file_1)
helper.extract_and_check(extract_dir_2, platform_bundle_file_2)
# Get files from extracted directories
extracted_files_1 = utils.get_relative_file_paths(extract_dir_1)
extracted_files_2 = utils.get_relative_file_paths(extract_dir_2)
assert sorted(extracted_files_1) == sorted(extracted_files_2), "The two extracted bundles do not match"
# Use CRC checksums to make sure archives' files match
crc_1 = helper.get_crc_of_files_in_archive(platform_bundle_file_1)
crc_2 = helper.get_crc_of_files_in_archive(platform_bundle_file_2)
del crc_1["manifest.xml"] # Remove manifest files from comparisons
del crc_2["manifest.xml"] # it is expected that they may differ
assert crc_1 == crc_2, "Extracted files from the two different bundles did not match"
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877175")
@pytest.mark.test_case_id("C16877177")
def test_WindowsAndMac_CreateMultiPlatformBundles_ValidContents(self, workspace, bundler_batch_helper):
"""
Creates bundles using the same asset list and compares that they are created equally. Also
validates that platform bundles exclude/include an expected file. (excluded for WIN, included for MAC)
Test Steps:
1. Create an asset list
2. Create bundles for both PC & Mac
3. Validate that bundles were created
4. Verify that expected missing file is not in windows bundle
5. Verify that expected file is in the mac bundle
6. Create duplicate bundles with allowOverwrites
7. Verify that files were generated
8. Verify original bundle checksums are equal to new bundle checksums
"""
helper = bundler_batch_helper
# fmt:off
assert "pc" in helper["platforms"] and "mac" in helper["platforms"], \
"This test requires both PC and MAC platforms to be enabled. " \
"Please rerun with commandline option: '--bundle_platforms=pc,mac'"
# fmt:on
seed_list = os.path.join(workspace.paths.engine_root(), "Assets", "Engine", "SeedAssetList.seed") # Engine seed list
# Useful bundle / asset list locations
bundle_dir = os.path.dirname(helper["bundle_file"])
bundle_files = {}
duplicate_bundle_files = {}
for platform in helper["platforms"]:
bundle_files[platform] = os.path.join(
bundle_dir,
helper.platform_file_name("bundle.pak", platforms[platform])
)
duplicate_bundle_files[platform] = os.path.join(
bundle_dir,
helper.platform_file_name("duplicateBundle.pak", platforms[platform])
)
duplicate_asset_info_file_request = os.path.join(helper["test_dir"], "duplicateAssetFileInfo.assetlist")
duplicate_bundle_file = os.path.join(helper["test_dir"], "duplicateBundle.pak")
# Create an asset list to work with
helper.call_assetLists(
seedListFile=seed_list,
assetListFile=helper["asset_info_file_request"],
platform=helper["platforms_as_string"],
)
# Create bundles for both pc and mac
helper.call_bundles(
assetListFile=helper["asset_info_file_request"],
outputBundlePath=helper["bundle_file"],
platform=helper["platforms_as_string"],
)
# Ensure that the bundles were created
for bundle_file in bundle_files.values():
assert os.path.isfile(bundle_file)
# This asset is created both on mac and windows platform
file_to_check = b"engineassets/slices/defaultlevelsetup.slice" # [use byte str because file is in binary]
# Extract the delta catalog file from pc archive. {file_to_check} SHOULD NOT be present for PC
file_contents = helper.extract_file_content(bundle_files["pc"], "DeltaCatalog.xml")
# fmt:off
assert file_contents.find(file_to_check), \
f"{file_to_check} was found in DeltaCatalog.xml in pc bundle file {bundle_files['pc']}"
# fmt:on
# Extract the delta catalog file from mac archive. {file_to_check} SHOULD be present for MAC
file_contents = helper.extract_file_content(bundle_files["mac"], "DeltaCatalog.xml")
# fmt:off
assert file_contents.find(file_to_check), \
f"{file_to_check} was not found in DeltaCatalog.xml in darwin bundle file {bundle_files['mac']}"
# fmt:on
# Gather checksums for first set of bundles
check_sums_before = {}
for platform in helper["platforms"]:
check_sums_before[platform] = helper.get_crc_of_files_in_archive(bundle_files[platform])
# Create duplicate asset list
helper.call_assetLists(
seedListFile=seed_list,
assetListFile=duplicate_asset_info_file_request,
platform=helper["platforms_as_string"],
allowOverwrites="",
)
# Create duplicate bundles
helper.call_bundles(
assetListFile=f"{helper['asset_info_file_request']},{duplicate_asset_info_file_request}",
outputBundlePath=f"{helper['bundle_file']},{duplicate_bundle_file}",
platform=helper["platforms_as_string"],
allowOverwrites="",
)
# Make sure all files were created as expected
for platform in helper["platforms"]:
assert os.path.isfile(bundle_files[platform]), f"File: {bundle_files[platform]} was not created"
# fmt:off
assert os.path.isfile(duplicate_bundle_files[platform]), \
f"File: {duplicate_bundle_files[platform]} was not created"
# fmt:on
# Get original bundles' contents again
check_sums_after = {}
check_sums_duplicates = {}
for platform in helper["platforms"]:
check_sums_after[platform] = helper.get_crc_of_files_in_archive(bundle_files[platform])
check_sums_duplicates[platform] = helper.get_crc_of_files_in_archive(duplicate_bundle_files[platform])
# Make sure original bundles' contents did not change during operation
for platform in helper["platforms"]:
# fmt:off
assert check_sums_before[platform] == check_sums_after[platform], \
f"Before and after check sum for {platform} did not match"
assert check_sums_before[platform] == check_sums_duplicates[platform], \
f"Before and duplicated check sum for {platform} did not match"
# fmt:on
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877174")
def test_WindowsAndMac_AddAndRemoveSeedPlatform_Success(self, workspace, bundler_batch_helper):
"""
Validates that the 'seeds' subcommand can add and remove seeds and seed platforms properly.
Also checks that destructive overwrites require the --allowOverwrites flag
Test Steps:
1. Create a PC Seed List from a test asset
2. Validate that seed list was generated with proper platform flag
3. Add Mac & PC as platforms to the seed list
4. Verify that seed has both Mac & PC platform flags
5. Remove Mac as a platform from the seed list
6. Verify that seed only has PC as a platform flag
7. Attempt to add a platform without using the --platform argument
8. Verify that asset bundler returns False and file contents did not change
9. Add Mac platform via --addPlatformToSeeds
10. Validate that seed has both Mac & PC platform flags
11. Attempt to remove platform without specifying a platform
12. Validate that seed has both Mac & PC platform flags
13. Validate that seed list contents did not change
14. Remove seed
15. Validate that seed was removed from the seed list
"""
helper = bundler_batch_helper
# Make sure asset list file does not exist entering the test
if os.path.exists(helper["asset_info_file_request"]):
fs.delete([helper["asset_info_file_request"]], True, True)
test_asset = r"fonts/open_sans/license.txt"
re_pattern = re.compile(r"""field="platformFlags" value="(\d+)" """)
all_lines = ""
def check_seed_platform(seed_file: str, asset: str, expected_platform_flag: int) -> str:
"""Helper function to check a seed's platform flag. Returns the contents of the seed_file"""
with open(seed_file, "r") as seed_list_file:
lines = seed_list_file.read()
assert asset in lines, f"{asset} was not added to asset list file {seed_file}"
re_match = re_pattern.search(lines)
assert re_match, f"PlatformFlags were not found in seed file {seed_file}"
platform_flag = int(re_match.group(1))
# fmt:off
assert platform_flag == expected_platform_flag, \
f"Expected platform flag to be {expected_platform_flag}. Actual {platform_flag}"
# fmt:on
return lines
# End check_seed_platform()
# Create seed list file
helper.call_seeds(
seedListFile=helper["seed_list_file"],
addSeed=test_asset,
platform="pc",
)
# Validate file exists and has proper platform flag
assert os.path.exists(helper["seed_list_file"]), f"seed list file was not created at {helper['seed_list_file']}"
check_seed_platform(helper["seed_list_file"], test_asset, helper["platform_values"]["pc"])
# Add Mac and pc as platform for seed
helper.call_seeds(
seedListFile=helper["seed_list_file"],
addSeed=test_asset,
platform="pc,mac",
)
# Validate both mac and pc are activated for seed
# fmt:off
check_seed_platform(helper["seed_list_file"], test_asset,
helper["platform_values"]["pc"] + helper["platform_values"]["mac"])
# fmt:on
# Remove MAC platform
helper.call_seeds(
seedListFile=helper["seed_list_file"],
removePlatformFromSeeds="",
platform="mac",
)
# Validate only pc platform for seed. Save file contents to variable
all_lines = check_seed_platform(helper["seed_list_file"], test_asset, helper["platform_values"]["pc"])
result, _ = helper.call_seeds(seedListFile=helper["seed_list_file"], addPlatformToSeeds="", )
assert result is False, "Calling --addPlatformToSeeds did not fail when not specifying the --platform argument"
# Make sure the file contents did not change
# fmt:off
with open(helper["seed_list_file"], "r") as asset_list_file:
assert all_lines == asset_list_file.read(), \
"Calling --addPlatformToSeeds without --platform failed but changed the seed file"
# fmt:on
# Add MAC platform via --addPlatformToSeeds
helper.call_seeds(
seedListFile=helper["seed_list_file"],
addPlatformToSeeds="",
platform="mac",
)
# Validate Mac platform was added back on. Save file contents
# fmt:off
all_lines = check_seed_platform(helper["seed_list_file"], test_asset,
helper["platform_values"]["pc"] + helper["platform_values"]["mac"])
# fmt:on
# Try to remove platform without specifying a platform to remove (should fail)
result, _ = helper.call_seeds(seedListFile=helper["seed_list_file"], removePlatformFromSeeds="", )
assert result is False, "Calling --removePlatformFromSeeds did not fail when not specifying the --platform argument"
# Make sure file contents did not change
# fmt:off
with open(helper["seed_list_file"], "r") as asset_list_file:
assert all_lines == asset_list_file.read(), \
"Calling --removePlatformFromSeeds without --platform failed but changed the seed file"
# fmt:on
# Remove the seed
helper.call_seeds(
seedListFile=helper["seed_list_file"],
removeSeed=test_asset,
platform="pc,mac",
)
# Validate seed was removed from file
# fmt:off
with open(helper["seed_list_file"], "r") as seed_list_file:
assert test_asset not in seed_list_file.read(), \
f"Seed was not removed from asset list file {helper['seed_list_file']}"
# fmt:on
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.SUITE_sandbox
@pytest.mark.test_case_id("C16877174")
@pytest.mark.test_case_id("C16877175")
@pytest.mark.test_case_id("C16877178")
# fmt:off
def test_WindowsAndMac_ComparisonOperations_Success(self, workspace, bundler_batch_helper, ap_setup_fixture,
asset_processor, timeout):
# fmt:on
"""
Tests asset list comparison, both by file and by comparison type. Uses a set
of controlled test assets to compare resulting output asset lists
1. Create comparison rules files
2. Create seed files for different sets of test assets
3. Create assetlist files for seed files
4. Validate assetlists were created properly
5. Compare using comparison rules files and just command line arguments
"""
helper = bundler_batch_helper
env = ap_setup_fixture
# fmt:off
assert "pc" in helper["platforms"] and "mac" in helper["platforms"], \
"This test requires both PC and MAC platforms to be enabled. " \
"Please rerun with commandline option: '--bundle_platforms=pc,mac'"
# fmt:on
# Test assets arranged in common lists: six (0-5) .txt files and .dat files
even_txt = ["txtfile_0.txt", "txtfile_2.txt", "txtfile_4.txt"]
odd_txt = ["txtfile_1.txt", "txtfile_3.txt", "txtfile_5.txt"]
even_dat = ["datfile_0.dat", "datfile_2.dat", "datfile_4.dat"]
odd_dat = ["datfile_1.dat", "datfile_3.dat", "datfile_5.dat"]
even_assets = even_txt + even_dat
odd_assets = odd_txt + odd_dat
all_txt = even_txt + odd_txt
all_dat = even_dat + odd_dat
all_assets = even_assets + odd_assets
# Maps a test asset to platform(s)
file_platforms = {
"txtfile_0.txt": "pc",
"txtfile_1.txt": "pc",
"txtfile_2.txt": "pc,mac",
"txtfile_3.txt": "pc,mac",
"txtfile_4.txt": "mac",
"txtfile_5.txt": "mac",
"datfile_0.dat": "pc",
"datfile_1.dat": "pc",
"datfile_2.dat": "pc,mac",
"datfile_3.dat": "pc,mac",
"datfile_4.dat": "mac",
"datfile_5.dat": "mac",
}
# Comparison rules files and their associated 'comparisonType' flags
comparison_files = [
("delta.rules", "0"),
("union.rules", "1"),
("intersection.rules", "2"),
("complement.rules", "3"),
("pattern.rules", "4"),
("combined.rules", "2,0"),
]
# Get our test assets ready and processed
utils.prepare_test_assets(env["tests_dir"], "C16877178", env["project_test_assets_dir"])
asset_processor.batch_process(timeout=timeout, fastscan=False, platforms="pc,mac")
# *** Some helper functions *** #
def create_seed_file(asset_names: List[str], seed_file_name: str) -> None:
"""Adds the [asset_names] to the seed file [seed_file_name] with their specific platform"""
for asset_file_name in asset_names:
helper.call_seeds(
seedListFile=os.path.join(helper["test_dir"], seed_file_name),
addSeed=os.path.join(env["test_asset_dir_name"], asset_file_name),
platform=file_platforms[asset_file_name],
)
def create_asset_list_file(seed_file_name: str, asset_list_file_name: str) -> None:
"""Simple wrapper for calling the Bundler Batch for a [seed_file_name] and [asset_list_file_name]"""
helper.call_assetLists(
assetListFile=os.path.join(helper["test_dir"], asset_list_file_name),
seedListFile=os.path.join(helper["test_dir"], seed_file_name),
platform="pc,mac",
)
def get_platform_assets(asset_name_list: List[str]) -> Dict[str, List[str]]:
"""Separates each asset in [asset_name_list] into their platforms"""
win_assets = []
mac_assets = []
for asset_name in asset_name_list:
if "pc" in file_platforms[asset_name]:
win_assets.append(asset_name)
if "mac" in file_platforms[asset_name]:
mac_assets.append(asset_name)
return {"win": win_assets, "mac": mac_assets}
def validate_asset_list(asset_list_file: str, asset_list: List[str]) -> None:
"""Validates that the [asset_list_file] contains exactly the assets in [asset_list]"""
assets_to_find = list(asset_list) # Make copy of list. We will be removing elements as we go
for rel_path in helper.get_asset_relative_paths(os.path.join(helper["test_dir"], asset_list_file)):
asset = os.path.split(rel_path)[1] # Get just the asset's file name
try:
assets_to_find.remove(asset) # Attempt to remove
except ValueError:
# Item not found in list? Unexpected asset
assert False, (
f"Unexpected asset found. Asset List: {asset_list_file}; "
f"Unexpected Asset {asset}; Expected Assets: {asset_list}"
)
# If assets_to_find is empty, we found all expected assets
assert (
len(assets_to_find) == 0
), f"Expected asset(s) {assets_to_find} not found in asset list: {asset_list_file}"
def validate_request_file(request_file: str, asset_names: List[str]) -> None:
"""Validates both mac and pc platform results for an assetlist request file"""
# Get platform result file names
win_asset_list_file = helper.platform_file_name(request_file, platforms["pc"])
mac_asset_list_file = helper.platform_file_name(request_file, platforms["mac"])
# Get expected platforms for each asset in asset_names
platform_files = get_platform_assets(asset_names)
# Validate each platform
validate_asset_list(win_asset_list_file, platform_files["win"])
validate_asset_list(mac_asset_list_file, platform_files["mac"])
def compare_and_check(
rule: str, # The rule to use for comparison
first_asset_list: str or List[str], # The parameter(s) for '--firstAssetList'
second_asset_list: str or List[str], # The parameter(s) for '--secondAssetList'
expected_asset_list: str or List[str], # A list of expected asset to be in the output assetlist
output_file: str or List[str] = "output.assetlist", # The parameter for '--output'
use_file: Optional[bool] = False, # Bool for whether to compare using the .rules file
pattern_type: Optional[str] = None, # Parameter for '--filePatternType' (pattern comparison only)
pattern: Optional[str] = None, # Parameter for '--filePattern' (pattern comparison only)
) -> None:
"""
Based on parameters, creates an asset bundler batch command for executing a compare,
then validates the resulting file. Runs command for both platforms (Win and Mac)
"""
def asset_lists_to_string(asset_lists: str or List[str]) -> object:
"""Converts a list of asset list files into a string parameter to use for Bundler CLI"""
if asset_lists is None:
return None
if type(asset_lists) == str:
asset_lists = [asset_lists]
out = ""
for asset_list in asset_lists:
if asset_list.startswith("$"):
# If it's a command line variable, don't worry about file platform
out += asset_list + ","
else:
# Get absolute file path
out += os.path.join(helper["test_dir"], asset_list) + ","
return out[:-1] # Trim off extra comma
# End asset_lists_to_string()
# Initialize common command arguments
# We do not specify a platform in the file names, the AssetBundlerBatch will handle that automatically
first_input_arg = asset_lists_to_string(first_asset_list) # --firstAssetList
second_input_arg = asset_lists_to_string(second_asset_list) # --secondAssetList
output_arg = asset_lists_to_string(output_file) # --output
def generate_compare_command(platform_arg: str) -> object:
"""Creates a string containing a full Compare command. This string can be executed as-is."""
cmd = [helper["bundler_batch"], "compare", f"--firstassetFile={first_input_arg}", f"--output={output_arg}"]
if platform_arg is not None:
cmd.append(f"--platform={platform_arg}")
if second_input_arg is not None:
cmd.append(f"--secondAssetFile={second_input_arg}")
if use_file:
file_name = os.path.join(helper["test_dir"], rule + ".rules")
cmd.append("--comparisonRulesFile=" + file_name)
else:
comp_type = [i for r, i in comparison_files if r.startswith(rule)][0] # Get comparisonType flag
cmd.append(f"--comparisonType={comp_type}")
if comp_type == "4":
# Extra arguments for pattern comparison
cmd.extend([f"--filePatternType={pattern_type}", f"--filePattern={pattern}"])
return cmd
# End generate_compare_command()
def verify_asset_list_contents(expected: str, output_asset_list: str) -> None:
# Compare relative paths from inside 'expected' and 'output_asset_list' (last output file from cmd)
expected_paths = []
actual_paths = []
for rel_path in helper.get_asset_relative_paths(expected):
expected_paths.append(rel_path)
for rel_path in helper.get_asset_relative_paths(output_asset_list):
actual_paths.append(rel_path)
# fmt:off
assert sorted(expected_paths) == sorted(actual_paths), \
"Asset list comparison did not yield expected results"
# fmt:on
# End verify_asset_list_contents()
def run_compare_command_and_verify(platform_arg: str, expect_pc_output: bool, expect_mac_output: bool) -> None:
# Expected asset list to equal result of comparison
expected_pc_asset_list = None
expected_mac_asset_list = None
# Last output file. Use this for comparison to 'expected'
output_pc_asset_list = None
output_mac_asset_list = None
# Add the platform to the file name to match what the Bundler will create
last_output_arg = output_arg.split(",")[-1]
if expect_pc_output:
platform = platforms["pc"]
expected_pc_asset_list = os.path.join(helper["test_dir"], helper.platform_file_name(expected_asset_list, platform))
output_pc_asset_list = helper.platform_file_name(last_output_arg, platform)
if expect_mac_output:
platform = platforms["mac"]
expected_mac_asset_list = os.path.join(helper["test_dir"], helper.platform_file_name(expected_asset_list, platform))
output_mac_asset_list = helper.platform_file_name(last_output_arg, platform)
# Build execution command
cmd = generate_compare_command(platform_arg)
# Execute command
subprocess.check_call(cmd)
# Verify and clean up
if expect_pc_output:
verify_asset_list_contents(expected_pc_asset_list, output_pc_asset_list)
fs.delete([output_pc_asset_list], True, True)
if expect_mac_output:
verify_asset_list_contents(expected_mac_asset_list, output_mac_asset_list)
fs.delete([output_mac_asset_list], True, True)
# End run_compare_command_and_verify()
# Generate command, run and validate for each platform
run_compare_command_and_verify("pc", True, False)
run_compare_command_and_verify("mac", False, True)
run_compare_command_and_verify("pc,mac", True, True)
#run_compare_command_and_verify(None, True, True)
# End compare_and_check()
# *** Start test execution code *** #
# Create comparison (.rules) files
for args in comparison_files:
rule_file = os.path.join(helper["test_dir"], args[0])
logger.info(f"Creating rule file: {rule_file}")
cmd = [
helper["bundler_batch"],
"comparisonRules",
f"--comparisonRulesFile={rule_file}",
f"--comparisonType={args[1]}",
r"--addComparison",
]
if args[1] == "4":
# If pattern comparison, append a few extra arguments
cmd.extend(["--filePatternType=0", "--filePattern=*.dat"])
subprocess.check_call(cmd)
assert os.path.exists(rule_file), f"Rule file {args[0]} was not created at location: {rule_file}"
# Create seed files for different sets of test assets (something to compare against)
create_seed_file(even_txt, "even_txt.seed")
create_seed_file(even_dat, "even_dat.seed")
create_seed_file(even_assets, "even_assets.seed")
create_seed_file(odd_txt, "odd_txt.seed")
create_seed_file(odd_dat, "odd_dat.seed")
create_seed_file(odd_assets, "odd_assets.seed")
create_seed_file(all_txt, "all_txt.seed")
create_seed_file(all_dat, "all_dat.seed")
create_seed_file(all_assets, "all_assets.seed")
# Create assetlist files for seed files
create_asset_list_file("even_txt.seed", "even_txt.assetlist")
create_asset_list_file("even_dat.seed", "even_dat.assetlist")
create_asset_list_file("even_assets.seed", "even_assets.assetlist")
create_asset_list_file("odd_txt.seed", "odd_txt.assetlist")
create_asset_list_file("odd_dat.seed", "odd_dat.assetlist")
create_asset_list_file("odd_assets.seed", "odd_assets.assetlist")
create_asset_list_file("all_txt.seed", "all_txt.assetlist")
create_asset_list_file("all_dat.seed", "all_dat.assetlist")
create_asset_list_file("all_assets.seed", "all_assets.assetlist")
# Make sure the assetlists were created properly (including platform validation)
validate_request_file("even_txt.assetlist", even_txt)
validate_request_file("even_dat.assetlist", even_dat)
validate_request_file("even_assets.assetlist", even_assets)
validate_request_file("odd_txt.assetlist", odd_txt)
validate_request_file("odd_dat.assetlist", odd_dat)
validate_request_file("odd_assets.assetlist", odd_assets)
validate_request_file("all_txt.assetlist", all_txt)
validate_request_file("all_dat.assetlist", all_dat)
validate_request_file("all_assets.assetlist", all_assets)
# Compare using comparison rules files and just command line arguments
for use_file in [True, False]:
compare_and_check(
"delta", "even_assets.assetlist", "all_assets.assetlist", "odd_assets.assetlist", use_file=use_file
)
compare_and_check(
"union", "even_assets.assetlist", "odd_assets.assetlist", "all_assets.assetlist", use_file=use_file
)
compare_and_check(
"intersection",
"even_assets.assetlist",
"all_assets.assetlist",
"even_assets.assetlist",
use_file=use_file,
)
compare_and_check(
"complement", "all_txt.assetlist", "all_assets.assetlist", "all_dat.assetlist", use_file=use_file
)
compare_and_check(
"pattern",
"all_assets.assetlist",
None,
"all_dat.assetlist",
use_file=use_file,
pattern_type="0",
pattern="*.dat",
)
# Special parameters for 'combined' comparisons
compare_and_check(
rule="combined",
first_asset_list=["all_dat.assetlist", "$first"],
second_asset_list=["even_assets.assetlist", "even_assets.assetlist"],
expected_asset_list="even_txt.assetlist",
output_file=["$first", "output.assetlist"],
use_file=use_file,
)
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877174")
@pytest.mark.test_case_id("C16877175")
def test_WindowsAndMac_AssetListCreation_OutputMatchesResult(self, workspace, bundler_batch_helper):
"""
Tests that assetlists are created equivalent to the output while being created, and
makes sure overwriting an existing file without the --allowOverwrites fails
Test Steps:
1. Check that Asset List creation requires PC platform flag
2. Create a PC Asset List using asset info file and default seed lists using --print
3. Validate all assets output are present in the asset list
4. Create a seed file
5. Attempt to overwrite Asset List without using --allowOverwrites
6. Validate that command returned an error and file contents did not change
7. Specifying platform but not "add" or "remove" should fail
8. Verify file Has changed
"""
helper = bundler_batch_helper
# fmt:off
assert "pc" in helper["platforms"], \
"This test requires the PC platform to be enabled. " \
"Please rerun with commandline option: '--bundle_platforms=pc'"
# fmt:on
# Assetlist result file (pc platform)
al_file_path = os.path.join(
helper["test_dir"],
helper.platform_file_name(helper["asset_info_file_name"], platforms["pc"])
)
seed_files_pattern = re.compile("Loading Seed List file ( ([^)]*) )")
# Create an asset file
output = subprocess.check_output(
[
helper["bundler_batch"],
"assetLists",
f"--assetListFile={helper['asset_info_file_request']}",
"--addDefaultSeedListFiles",
"--platform=pc",
"--print",
f"--project-path={workspace.project}"
],
universal_newlines=True,
)
seed_files = seed_files_pattern.findall(output)
# Validate all assets output are present in the resulting asset file
for seed_file in seed_files:
for rel_path in helper.get_seed_relative_paths_for_platform(seed_file, helper.get_platform_flag("pc")):
assert rel_path in output, f"{rel_path} was not found in output from Asset Bundle Batch"
# Create a seed file
helper.call_seeds(
seedListFile=helper["seed_list_file"],
addSeed=r"levels\testdependencieslevel\level.pak",
platform="pc",
)
# Get file contents before trying a failed overwrite attempt
with open(al_file_path, "r") as asset_file:
file_contents = asset_file.read()
result, _ = helper.call_assetLists(
assetListFile=helper["asset_info_file_request"],
seedListFile=helper["seed_list_file"],
platform="pc",
print="",
)
assert result is False, "Overwriting without override did not throw an error"
# Validate file did not change when overwrite failed
with open(al_file_path, "r") as asset_file:
assert file_contents == asset_file.read(), "The failed overwrite changed the file {al_file_path}"
# Specifying platform but not "add" or "remove" should fail
result, _ = helper.call_assetLists(
assetListFile=helper["asset_info_file_request"],
seedListFile=helper["seed_list_file"],
platform="pc",
allowOverwrites="",
)
assert result, "Overwriting with override threw an error"
# Make sure the file is now changed
with open(al_file_path, "r") as asset_file:
assert file_contents != asset_file.read(), f"The overwrite did not change the file {al_file_path}"
@pytest.mark.BAT
@pytest.mark.assetpipeline
@pytest.mark.test_case_id("C16877175")
@pytest.mark.test_case_id("C16877177")
# fmt:off
def test_WindowsAndMac_AP_BundleProcessing_BundleProcessedAtRuntime(self, workspace, bundler_batch_helper,
asset_processor, request):
# fmt:on
"""
Test to make sure the AP GUI will process a newly created bundle file
Test Steps:
1. Make asset list file (used for bundle creation)
2. Start Asset Processor GUI
3. Make bundle in <project_folder>/Bundles
4. Validate file was created in Bundles folder
5. Make sure bundle now exists in cache
"""
# Set up helpers and variables
helper = bundler_batch_helper
# Make sure file gets deleted on teardown
request.addfinalizer(lambda: fs.delete([bundle_result_path], True, False))
bundles_folder = os.path.join(workspace.paths.engine_root(), workspace.project, "Bundles")
level_pak = r"levels\testdependencieslevel\level.pak"
bundle_request_path = os.path.join(bundles_folder, "bundle.pak")
bundle_result_path = os.path.join(bundles_folder,
helper.platform_file_name("bundle.pak", workspace.asset_processor_platform))
bundle_cache_path = os.path.join(workspace.paths.platform_cache(),
"Bundles",
helper.platform_file_name("bundle.pak", workspace.asset_processor_platform))
# Create target 'Bundles' folder if DNE
if not os.path.exists(bundles_folder):
os.mkdir(bundles_folder)
# Delete target bundle file if it already exists
if os.path.exists(bundle_result_path):
fs.delete([bundle_result_path], True, False)
# Make asset list file (used for bundle creation)
helper.call_assetLists(
addSeed=level_pak,
assetListFile=helper["asset_info_file_request"],
)
# Run Asset Processor GUI
result, _ = asset_processor.gui_process()
assert result, "AP GUI failed"
time.sleep(5)
# Make bundle in <project_folder>/Bundles
helper.call_bundles(
assetListFile=helper["asset_info_file_result"],
outputBundlePath=bundle_request_path,
maxSize="2048",
)
# Ensure file was created
assert os.path.exists(bundle_result_path), f"Bundle was not created at location: {bundle_result_path}"
timeout = 10
waiter.wait_for(lambda: os.path.exists(bundle_cache_path), timeout=timeout)
# Make sure bundle now exists in cache
assert os.path.exists(bundle_cache_path), f"{bundle_cache_path} not found"
@pytest.mark.BAT
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_FilesMarkedSkip_FilesAreSkipped(self, workspace, bundler_batch_helper):
"""
Test Steps:
1. Create an asset list with a file marked as skip
2. Verify file was created
3. Verify that only the expected assets are present in the created asset list
"""
expected_assets = [
"ui/canvases/lyshineexamples/animation/multiplesequences.uicanvas",
"ui/textures/prefab/button_normal.sprite"
]
bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],
addSeed="ui/canvases/lyshineexamples/animation/multiplesequences.uicanvas",
skip="ui/textures/prefab/button_disabled.sprite,ui/scripts/lyshineexamples/animation/multiplesequences.luac,"
"ui/textures/prefab/tooltip_sliced.sprite,ui/scripts/lyshineexamples/unloadthiscanvasbutton.luac,fonts/vera.fontfamily,fonts/vera-italic.font,"
"fonts/vera.font,fonts/vera-bold.font,fonts/vera-bold-italic.font,fonts/vera-italic.ttf,fonts/vera.ttf,fonts/vera-bold.ttf,fonts/vera-bold-italic.ttf"
)
assert os.path.isfile(bundler_batch_helper["asset_info_file_result"])
assets_in_list = []
for rel_path in bundler_batch_helper.get_asset_relative_paths(bundler_batch_helper["asset_info_file_result"]):
assets_in_list.append(rel_path)
assert sorted(assets_in_list) == sorted(expected_assets)
@pytest.mark.BAT
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_AssetListSkipOneOfTwoParents_SharedDependencyIsIncluded(self, workspace,
bundler_batch_helper):
"""
Test Steps:
1. Create Asset List with a parent asset that is skipped
2. Verify that Asset List was created
3. Verify that only the expected assets are present in the asset list
"""
expected_assets = [
"testassets/bundlerskiptest_grandparent.dynamicslice",
"testassets/bundlerskiptest_parenta.dynamicslice",
"testassets/bundlerskiptest_commonchild.dynamicslice"
]
# Test Asset Structure:
# Grandparent
# / \
# ParentA ParentB
# \ /
# CommonChild
# Even if we exclude "ParentB", we should still have "CommonChild" since it is a product dependency of "ParentA"
bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],
addSeed="testassets/bundlerskiptest_grandparent.dynamicslice",
skip="testassets/bundlerskiptest_parentb.dynamicslice"
)
assert os.path.isfile(bundler_batch_helper["asset_info_file_result"])
assets_in_list = []
for rel_path in bundler_batch_helper.get_asset_relative_paths(bundler_batch_helper["asset_info_file_result"]):
assets_in_list.append(rel_path)
assert sorted(assets_in_list) == sorted(expected_assets)
@pytest.mark.BAT
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_AssetLists_SkipRoot_ExcludesAll(self, workspace, bundler_batch_helper):
"""
Negative scenario test that skips the same file being used as the parent seed.
Test Steps:
1. Create an asset list that skips the root asset
2. Verify that asset list was not generated
"""
result, _ = bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],
addSeed="libs/particles/milestone2particles.xml",
skip="libs/particles/milestone2particles.xml"
)
if not result:
assert not os.path.isfile(bundler_batch_helper["asset_info_file_result"])
return
# If an error was not thrown, this test should fail
assert False
@pytest.mark.BAT
@pytest.mark.assetpipeline
# fmt:off
def test_WindowsAndMac_AssetLists_SkipUniversalWildcard_ExcludesAll(self, workspace, bundler_batch_helper):
"""
Negative scenario test that uses the all wildcard when generating an asset list.
Test Steps:
1. Create an Asset List while using the universal all wildcard "*"
2. Verify that asset list was not generated
"""
result, _ = bundler_batch_helper.call_assetLists(
assetListFile=bundler_batch_helper['asset_info_file_request'],
addSeed="libs/particles/milestone2particles.xml",
skip="*"
)
if not result:
assert not os.path.isfile(bundler_batch_helper["asset_info_file_result"])
return
# If an error was not thrown, this test should fail
assert False
|
the-stack_0_17890 | """
.. _tut_stats_cluster_source_rANOVA:
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Denis Engemannn <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id, copy=False)
###############################################################################
# Transform to source space
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50)
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep
###############################################################################
# Transform to common cortical space
# Normally you would read in estimates across several subjects and morph
# them to the same cortical space (e.g. fsaverage). For example purposes,
# we will simulate this by just having each "subject" have the same
# response (just noisy in source space) here.
# we'll only consider the left hemisphere in this example.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.array([], int)] # right hemi is empty
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
# Now we need to prepare the group matrix for the ANOVA statistic.
# To make the clustering function work correctly with the
# ANOVA function X needs to be a list of multi-dimensional arrays
# (one per condition) of shape: samples (subjects) x time x space
X = np.transpose(X, [2, 1, 0, 3]) # First we permute dimensions
# finally we split the array into a list a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language)
effects = 'A:B' # Without this also the main effects will be returned.
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
# A stat_fun must deal with a variable number of input arguments.
def stat_fun(*args):
# Inside the clustering function each condition will be passed as
# flattened array, necessitated by the clustering procedure.
# The ANOVA however expects an input array of dimensions:
# subjects X conditions X observations (optional).
# The following expression catches the list input
# and swaps the first and the second dimension, and finally calls ANOVA.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
# get f-values only.
# Note. for further details on this ANOVA function consider the
# corresponding time frequency example.
###############################################################################
# Compute clustering statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, colormap='mne',
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.show_view('lateral')
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
|
the-stack_0_17891 | import os
import re
import uuid
from distutils.version import LooseVersion
from typing import Tuple
import click
from git import InvalidGitRepositoryError
from demisto_sdk.commands.common.constants import PLAYBOOK, FileType
from demisto_sdk.commands.common.git_util import GitUtil
from demisto_sdk.commands.common.tools import (find_type, get_yaml,
is_string_uuid, write_yml)
from demisto_sdk.commands.format.format_constants import (
ERROR_RETURN_CODE, NEW_FILE_DEFAULT_5_5_0_FROMVERSION, SCHEMAS_PATH,
SKIP_RETURN_CODE, SUCCESS_RETURN_CODE)
from demisto_sdk.commands.format.update_generic_yml import BaseUpdateYML
class BasePlaybookYMLFormat(BaseUpdateYML):
def __init__(self,
input: str = '',
output: str = '',
path: str = '',
from_version: str = '',
no_validate: bool = False,
verbose: bool = False,
assume_yes: bool = False,
deprecate: bool = False):
super().__init__(input=input, output=output, path=path, from_version=from_version, no_validate=no_validate,
verbose=verbose, assume_yes=assume_yes, deprecate=deprecate)
def add_description(self):
"""Add empty description to playbook and tasks."""
if self.verbose:
click.echo('Adding descriptions for the playbook and to relevant tasks')
if 'description' not in set(self.data.keys()):
click.secho('No description is specified for this playbook, would you like to add a description? [Y/n]',
fg='bright_red')
user_answer = ''
while not user_answer:
user_answer = input()
if user_answer in ['n', 'N', 'no', 'No']:
user_description = ''
self.data['description'] = user_description
elif user_answer in ['y', 'Y', 'yes', 'Yes']:
user_description = input("Please enter the description\n")
self.data['description'] = user_description
else:
click.secho('Invalid input, would you like to add a description? [Y/n]', fg='bright_red')
user_answer = ''
for task_id, task in self.data.get('tasks', {}).items():
if not task['task'].get('description') and task['type'] in ['title', 'start', 'playbook']:
task['task'].update({'description': ''})
def update_fromversion_by_user(self):
"""If no fromversion is specified, asks the user for it's value and updates the playbook."""
if not self.data.get('fromversion', ''):
if self.assume_yes:
if self.verbose:
if self.from_version:
click.echo(f"Adding `fromversion: {self.from_version}`")
else:
click.echo(f"Adding `fromversion: {NEW_FILE_DEFAULT_5_5_0_FROMVERSION}`")
self.data[
'fromversion'] = self.from_version if self.from_version else NEW_FILE_DEFAULT_5_5_0_FROMVERSION
return
click.secho('No fromversion is specified for this playbook, would you like me to update for you? [Y/n]',
fg='red')
user_answer = input()
if user_answer in ['n', 'N', 'no', 'No']:
click.secho('Moving forward without updating fromversion tag', fg='yellow')
return
if self.from_version:
if self.verbose:
click.echo(f"Adding `fromversion: {self.from_version}`")
self.data['fromversion'] = self.from_version
return
is_input_version_valid = False
while not is_input_version_valid:
click.secho('Please specify the desired version X.X.X', fg='yellow')
user_desired_version = input()
if re.match(r'\d+\.\d+\.\d+', user_desired_version):
self.data['fromversion'] = user_desired_version
is_input_version_valid = True
else:
click.secho('Version format is not valid', fg='red')
elif not self.old_file and LooseVersion(self.data.get('fromversion', '0.0.0')) < \
LooseVersion(NEW_FILE_DEFAULT_5_5_0_FROMVERSION):
if self.assume_yes:
self.data['fromversion'] = NEW_FILE_DEFAULT_5_5_0_FROMVERSION
else:
set_from_version = str(
input(f"\nYour current fromversion is: '{self.data.get('fromversion')}'. Do you want "
f"to set it to '5.5.0'? Y/N ")).lower()
if set_from_version in ['y', 'yes']:
self.data['fromversion'] = NEW_FILE_DEFAULT_5_5_0_FROMVERSION
def update_task_uuid(self):
"""If taskid field and the id under the task field are not from uuid type, generate uuid instead"""
for task_key, task in self.data.get('tasks', {}).items():
taskid = str(task.get('taskid', ''))
task_id_under_task = str(task.get('task', {}).get('id', ''))
if not is_string_uuid(taskid) or not is_string_uuid(task_id_under_task):
if self.verbose:
click.secho(f"Taskid field and the id under task field must be from uuid format. Generating uuid "
f"for those fields under task key: {task_key}", fg='white')
generated_uuid = str(uuid.uuid4())
task['taskid'] = generated_uuid
task['task']['id'] = generated_uuid
def run_format(self) -> int:
self.update_fromversion_by_user()
super().update_yml(file_type=PLAYBOOK)
self.add_description()
self.update_task_uuid()
self.save_yml_to_destination_file()
return SUCCESS_RETURN_CODE
def format_file(self) -> Tuple[int, int]:
"""Manager function for the playbook YML updater."""
format_res = self.run_format()
if format_res:
return format_res, SKIP_RETURN_CODE
else:
return format_res, self.initiate_file_validator()
class PlaybookYMLFormat(BasePlaybookYMLFormat):
"""PlaybookYMLFormat class is designed to update playbooks YML file according to Demisto's convention.
Attributes:
input (str): the path to the file we are updating at the moment.
output (str): the desired file name to save the updated version of the YML to.
"""
def delete_sourceplaybookid(self):
"""Delete the not needed sourceplaybookid fields"""
if self.verbose:
click.echo('Removing sourceplaybookid field from playbook')
if 'sourceplaybookid' in self.data:
self.data.pop('sourceplaybookid', None)
def remove_copy_and_dev_suffixes_from_subplaybook(self):
for task_id, task in self.data.get('tasks', {}).items():
if task['task'].get('playbookName'):
task['task']['playbookName'] = task['task'].get('playbookName').replace('_dev', ''). \
replace('_copy', '')
task['task']['name'] = task['task'].get('name').replace('_dev', ''). \
replace('_copy', '')
def update_playbook_task_name(self):
"""Updates the name of the task to be the same as playbookName it is running."""
if self.verbose:
click.echo('Updating name of tasks who calls other playbooks to their name')
for task_id, task in self.data.get('tasks', {}).items():
if task.get('type', '') == 'playbook':
task_name = task.get('task').get('playbookName', task.get('task').get('playbookId', ''))
if task_name:
task['task']['name'] = task_name
def check_for_subplaybook_usages(self, file_path: str, current_playbook_id: str, new_playbook_id: str) -> None:
"""Check if the current_playbook_id appears in the file's playbook type tasks and change it if needed.
Arguments:
file_path (str): The file path to check.
current_playbook_id (str): The current playbook ID.
new_playbook_id (str): The new playbook ID.
"""
updated_tasks = []
# if the changed file is a playbook get it's data
if find_type(file_path) in [FileType.PLAYBOOK, FileType.TEST_PLAYBOOK]:
playbook_data = get_yaml(file_path)
# go through all the tasks
for task_id, task_data in playbook_data.get('tasks').items():
# if a task is of playbook type
if task_data.get('type') == 'playbook':
id_key = 'playbookId' if 'playbookId' in task_data.get('task') else 'playbookName'
# make sure the playbookId or playbookName use the new id and not the old
if task_data.get('task', {}).get(id_key) == current_playbook_id:
playbook_data['tasks'][task_id]['task'][id_key] = new_playbook_id
updated_tasks.append(task_id)
# if any tasks were changed re-write the playbook
if updated_tasks:
if self.verbose:
click.echo(f'Found usage of playbook in {file_path} tasks: '
f'{" ".join(updated_tasks)} - Updating playbookId')
write_yml(file_path, playbook_data)
def update_playbook_usages(self) -> None:
"""Check if the current playbook is used as a sub-playbook in other changed playbooks.
Change the playbook's id in the tasks id needed.
"""
current_playbook_id = str(self.data.get('id'))
new_playbook_id = str(self.data.get('name'))
# if the id and name are the same - there is no need for this format.
if current_playbook_id == new_playbook_id:
return
# gather all the changed files - if the formatted playbook was
# modified then any additional playbook changes were changed alongside it -
# we would use git to gather all other changed playbooks
try:
git_util = GitUtil()
modified_files = git_util.modified_files(include_untracked=True)
added_files = git_util.added_files(include_untracked=True)
renamed_files = git_util.renamed_files(include_untracked=True, get_only_current_file_names=True)
all_changed_files = modified_files.union(added_files).union(renamed_files) # type: ignore[arg-type]
except (InvalidGitRepositoryError, TypeError) as e:
click.secho('Unable to connect to git - skipping sub-playbook checks', fg='yellow')
if self.verbose:
click.secho(f'The error: {e}')
return
for file_path in all_changed_files:
self.check_for_subplaybook_usages(str(file_path), current_playbook_id, new_playbook_id)
def remove_empty_fields_from_scripts(self):
"""Removes unnecessary empty fields from SetIncident, SetIndicator, CreateNewIncident, CreateNewIndicator
scripts """
scripts = ["setIncident", "setIndicator", "createNewIncident", "createNewIndicator"]
for task_id, task in self.data.get('tasks', {}).items():
current_task_script = task.get('task', {}).get('script', '')
if any(script in current_task_script for script in scripts):
script_args = task.get('scriptarguments', {})
for key in list(script_args):
if not script_args[key]: # if value is empty
script_args.pop(key)
def run_format(self) -> int:
try:
click.secho(f'\n================= Updating file {self.source_file} =================', fg='bright_blue')
self.update_playbook_usages()
self.update_tests()
self.remove_copy_and_dev_suffixes_from_subplaybook()
self.update_conf_json('playbook')
self.delete_sourceplaybookid()
self.update_playbook_task_name()
self.remove_empty_fields_from_scripts()
super().run_format()
return SUCCESS_RETURN_CODE
except Exception as err:
if self.verbose:
click.secho(f'\nFailed to update file {self.source_file}. Error: {err}', fg='red')
return ERROR_RETURN_CODE
class TestPlaybookYMLFormat(BasePlaybookYMLFormat):
"""TestPlaybookYMLFormat class is designed to update playbooks YML file according to Demisto's convention.
Attributes:
input (str): the path to the file we are updating at the moment.
output (str): the desired file name to save the updated version of the YML to.
"""
def __init__(self, *args, **kwargs):
kwargs['path'] = os.path.normpath(
os.path.join(__file__, "..", "..", "common", SCHEMAS_PATH, 'playbook.yml'))
super().__init__(*args, **kwargs)
def run_format(self) -> int:
try:
click.secho(f'\n================= Updating file {self.source_file} =================', fg='bright_blue')
return super().run_format()
except Exception as err:
if self.verbose:
click.secho(f'\nFailed to update file {self.source_file}. Error: {err}', fg='red')
return ERROR_RETURN_CODE
|
the-stack_0_17893 | import logging
import os
from datetime import datetime
from elasticsearch import Elasticsearch
log = logging.getLogger('file')
es_url = os.environ.get('ES_URL', 'http://172.30.0.55:9200')
es_error_index_test = "anuvaad-etl-errors-test-v1"
es_core_error_index = "anuvaad-etl-errors-core-v1"
es_wf_error_index = "anuvaad-etl-errors-wf-v1"
es_error_type = "errors"
def instantiate_es_client():
es_client = Elasticsearch([es_url])
return es_client
def index_to_es(index_obj):
try:
es = instantiate_es_client()
id = index_obj["errorID"]
if index_obj["errorType"] == "core-error":
in_name = es_core_error_index
else:
in_name = es_wf_error_index
index_obj = add_timestamp_field(index_obj)
es.index(index=in_name, doc_type=es_error_type, id=id, body=index_obj)
except Exception as e:
log.exception("Indexing FAILED for errorID: " + index_obj["errorID"])
def add_timestamp_field(error):
date_format = "%Y-%m-%d'T'%H:%M:%S.%f'Z'"
epoch = error["timeStamp"]
epoch_short = eval((str(epoch)[:10]))
final_date = datetime.fromtimestamp(epoch_short).strftime(date_format)
error["@timestamp"] = final_date
return error |
the-stack_0_17894 | class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
assert isinstance(candidates, list) and isinstance(target, int)
if not candidates:
return
def findSum(l,r,curt_sum, rst):
if l>r+1 or curt_sum > target:
return
if curt_sum == target:
rsts.append(rst)
for i in range(l,r+1,1):
findSum(i, r, curt_sum+candidates[i], rst+[candidates[i]])
return
rsts = []
findSum(0,len(candidates)-1,0,[])
return rsts |
the-stack_0_17895 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 10)
X_poly = poly_reg.fit_transform(X)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
#Linear Graph
plt.scatter(X, y, color = '#61f248')
plt.plot(X, lin_reg.predict(X), color = '#9fac73')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
#Polynomial Graph
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = '#2bdf9e')
plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = '#18a9df')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
lin_reg.predict([[6.5]])
lin_reg_2.predict(poly_reg.fit_transform([[6.5]])) |
the-stack_0_17896 | import asyncio
import json
import aiohttp
class RateLmited(Exception):
pass
class Client:
def __init__(self):
self.base = f"https://lichess.org/api"
self.session = aiohttp.ClientSession()
self.timeout = 10
self.errors = {
429 : RateLmited
}
async def _request(self, method, endpoint, data):
url = self.base + endpoint
async with self.session.request(method, url, json=data, timeout = self.timeout) as r:
if r.status in list(self.errors.keys()):
raise self.errors.get(r.status)
return await r.json()
async def post(self, endpoint, data):
resp = await self._request("POST", endpoint, data)
return(resp)
async def get(self, endpoint):
resp = await self._request("GET", endpoint, None)
return(resp)
async def put(self, endpoint, data):
resp = await self._request("PUT", endpoint, data)
return(resp)
async def delete(self, endpoint):
resp = await self._request("DELETE", endpoint, None)
return(resp)
class LichessHTTP:
def __init__(self):
self.client = Client()
async def create_open_challange(self, varient):
endpoint = "/challenge/open"
body = {"varient" : varient}
resp = await self.client.post(endpoint, body)
return resp
|
the-stack_0_17898 | """
This is the main trainer script for box-shape AE/VAE experiments.
Use scripts/train_ae_box_chair.sh or scripts/train_vae_box_chair.sh to run.
"""
import os
import time
import sys
import shutil
import random
from time import strftime
from argparse import ArgumentParser
import numpy as np
import torch
import torch.utils.data
from config import add_train_vae_args
from data import PartNetDataset, Tree
import utils
# Use 1-4 CPU threads to train.
# Don't use too many CPU threads, which will slow down the training.
torch.set_num_threads(2)
def train(conf):
# load network model
models = utils.get_model_module(conf.model_version)
# check if training run already exists. If so, delete it.
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)) or \
os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
response = input('A training run named "%s" already exists, overwrite? (y/n) ' % (conf.exp_name))
if response != 'y':
sys.exit()
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.log_path, conf.exp_name))
if os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.model_path, conf.exp_name))
# create directories for this run
os.makedirs(os.path.join(conf.model_path, conf.exp_name))
os.makedirs(os.path.join(conf.log_path, conf.exp_name))
# file log
flog = open(os.path.join(conf.log_path, conf.exp_name, 'train.log'), 'w')
# set training device
device = torch.device(conf.device)
print(f'Using device: {conf.device}')
flog.write(f'Using device: {conf.device}\n')
# log the object category information
print(f'Object Category: {conf.category}')
flog.write(f'Object Category: {conf.category}\n')
# control randomness
if conf.seed < 0:
conf.seed = random.randint(1, 10000)
print("Random Seed: %d" % (conf.seed))
flog.write(f'Random Seed: {conf.seed}\n')
random.seed(conf.seed)
np.random.seed(conf.seed)
torch.manual_seed(conf.seed)
# save config
torch.save(conf, os.path.join(conf.model_path, conf.exp_name, 'conf.pth'))
# create models
encoder = models.RecursiveEncoder(conf, variational=True, probabilistic=not conf.non_variational)
decoder = models.RecursiveDecoder(conf)
models = [encoder, decoder]
model_names = ['encoder', 'decoder']
# create optimizers
encoder_opt = torch.optim.Adam(encoder.parameters(), lr=conf.lr)
decoder_opt = torch.optim.Adam(decoder.parameters(), lr=conf.lr)
optimizers = [encoder_opt, decoder_opt]
optimizer_names = ['encoder', 'decoder']
# learning rate scheduler
encoder_scheduler = torch.optim.lr_scheduler.StepLR(encoder_opt, \
step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)
decoder_scheduler = torch.optim.lr_scheduler.StepLR(decoder_opt, \
step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)
# create training and validation datasets and data loaders
data_features = ['object']
train_dataset = PartNetDataset(conf.data_path, conf.train_dataset, data_features, \
load_geo=conf.load_geo)
valdt_dataset = PartNetDataset(conf.data_path, conf.val_dataset, data_features, \
load_geo=conf.load_geo)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=conf.batch_size, \
shuffle=True, collate_fn=utils.collate_feats)
valdt_dataloader = torch.utils.data.DataLoader(valdt_dataset, batch_size=conf.batch_size, \
shuffle=True, collate_fn=utils.collate_feats)
# create logs
if not conf.no_console_log:
header = ' Time Epoch Dataset Iteration Progress(%) LR BoxLoss StructLoss EdgeExists KLDivLoss SymLoss AdjLoss AnchorLoss TotalLoss'
if not conf.no_tb_log:
# https://github.com/lanpa/tensorboard-pytorch
from tensorboardX import SummaryWriter
train_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'train'))
valdt_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'val'))
# send parameters to device
for m in models:
m.to(device)
for o in optimizers:
utils.optimizer_to_device(o, device)
# start training
print("Starting training ...... ")
flog.write('Starting training ......\n')
start_time = time.time()
last_checkpoint_step = None
last_train_console_log_step, last_valdt_console_log_step = None, None
train_num_batch, valdt_num_batch = len(train_dataloader), len(valdt_dataloader)
# train for every epoch
for epoch in range(conf.epochs):
if not conf.no_console_log:
print(f'training run {conf.exp_name}')
flog.write(f'training run {conf.exp_name}\n')
print(header)
flog.write(header+'\n')
train_batches = enumerate(train_dataloader, 0)
valdt_batches = enumerate(valdt_dataloader, 0)
train_fraction_done, valdt_fraction_done = 0.0, 0.0
valdt_batch_ind = -1
# train for every batch
for train_batch_ind, batch in train_batches:
train_fraction_done = (train_batch_ind + 1) / train_num_batch
train_step = epoch * train_num_batch + train_batch_ind
log_console = not conf.no_console_log and (last_train_console_log_step is None or \
train_step - last_train_console_log_step >= conf.console_log_interval)
if log_console:
last_train_console_log_step = train_step
# set models to training mode
for m in models:
m.train()
# forward pass (including logging)
total_loss = forward(
batch=batch, data_features=data_features, encoder=encoder, decoder=decoder, device=device, conf=conf,
is_valdt=False, step=train_step, epoch=epoch, batch_ind=train_batch_ind, num_batch=train_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=train_writer,
lr=encoder_opt.param_groups[0]['lr'], flog=flog)
# optimize one step
encoder_scheduler.step()
decoder_scheduler.step()
encoder_opt.zero_grad()
decoder_opt.zero_grad()
total_loss.backward()
encoder_opt.step()
decoder_opt.step()
# save checkpoint
with torch.no_grad():
if last_checkpoint_step is None or \
train_step - last_checkpoint_step >= conf.checkpoint_interval:
print("Saving checkpoint ...... ", end='', flush=True)
flog.write("Saving checkpoint ...... ")
utils.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=True, optimizers=optimizers, optimizer_names=model_names)
print("DONE")
flog.write("DONE\n")
last_checkpoint_step = train_step
# validate one batch
while valdt_fraction_done <= train_fraction_done and valdt_batch_ind+1 < valdt_num_batch:
valdt_batch_ind, batch = next(valdt_batches)
valdt_fraction_done = (valdt_batch_ind + 1) / valdt_num_batch
valdt_step = (epoch + valdt_fraction_done) * train_num_batch - 1
log_console = not conf.no_console_log and (last_valdt_console_log_step is None or \
valdt_step - last_valdt_console_log_step >= conf.console_log_interval)
if log_console:
last_valdt_console_log_step = valdt_step
# set models to evaluation mode
for m in models:
m.eval()
with torch.no_grad():
# forward pass (including logging)
__ = forward(
batch=batch, data_features=data_features, encoder=encoder, decoder=decoder, device=device, conf=conf,
is_valdt=True, step=valdt_step, epoch=epoch, batch_ind=valdt_batch_ind, num_batch=valdt_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=valdt_writer,
lr=encoder_opt.param_groups[0]['lr'], flog=flog)
# save the final models
print("Saving final checkpoint ...... ", end='', flush=True)
flog.write("Saving final checkpoint ...... ")
utils.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=False, optimizers=optimizers, optimizer_names=optimizer_names)
print("DONE")
flog.write("DONE\n")
flog.close()
def forward(batch, data_features, encoder, decoder, device, conf,
is_valdt=False, step=None, epoch=None, batch_ind=0, num_batch=1, start_time=0,
log_console=False, log_tb=False, tb_writer=None, lr=None, flog=None):
objects = batch[data_features.index('object')]
losses = {
'box': torch.zeros(1, device=device),
'anchor': torch.zeros(1, device=device),
'leaf': torch.zeros(1, device=device),
'exists': torch.zeros(1, device=device),
'semantic': torch.zeros(1, device=device),
'edge_exists': torch.zeros(1, device=device),
'kldiv': torch.zeros(1, device=device),
'sym': torch.zeros(1, device=device),
'adj': torch.zeros(1, device=device)}
# process every data in the batch individually
for obj in objects:
obj.to(device)
# encode object to get root code
root_code = encoder.encode_structure(obj=obj)
# get kldiv loss
if not conf.non_variational:
root_code, obj_kldiv_loss = torch.chunk(root_code, 2, 1)
obj_kldiv_loss = -obj_kldiv_loss.sum() # negative kldiv, sum over feature dimensions
losses['kldiv'] = losses['kldiv'] + obj_kldiv_loss
# decode root code to get reconstruction loss
obj_losses = decoder.structure_recon_loss(z=root_code, gt_tree=obj)
for loss_name, loss in obj_losses.items():
losses[loss_name] = losses[loss_name] + loss
for loss_name in losses.keys():
losses[loss_name] = losses[loss_name] / len(objects)
losses['box'] *= conf.loss_weight_box
losses['anchor'] *= conf.loss_weight_anchor
losses['leaf'] *= conf.loss_weight_leaf
losses['exists'] *= conf.loss_weight_exists
losses['semantic'] *= conf.loss_weight_semantic
losses['edge_exists'] *= conf.loss_weight_edge_exists
losses['kldiv'] *= conf.loss_weight_kldiv
losses['sym'] *= conf.loss_weight_sym
losses['adj'] *= conf.loss_weight_adj
total_loss = 0
for loss in losses.values():
total_loss += loss
with torch.no_grad():
# log to console
if log_console:
print(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{lr:>5.2E} '''
f'''{losses['box'].item():>11.2f} '''
f'''{(losses['leaf']+losses['exists']+losses['semantic']).item():>11.2f} '''
f'''{losses['edge_exists'].item():>11.2f} '''
f'''{losses['kldiv'].item():>10.2f} '''
f'''{losses['sym'].item():>10.2f} '''
f'''{losses['adj'].item():>10.2f} '''
f'''{losses['anchor'].item():>10.2f} '''
f'''{total_loss.item():>10.2f}''')
flog.write(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{lr:>5.2E} '''
f'''{losses['box'].item():>11.2f} '''
f'''{(losses['leaf']+losses['exists']+losses['semantic']).item():>11.2f} '''
f'''{losses['edge_exists'].item():>11.2f} '''
f'''{losses['kldiv'].item():>10.2f} '''
f'''{losses['sym'].item():>10.2f} '''
f'''{losses['adj'].item():>10.2f} '''
f'''{losses['anchor'].item():>10.2f} '''
f'''{total_loss.item():>10.2f}\n''')
flog.flush()
# log to tensorboard
if log_tb and tb_writer is not None:
tb_writer.add_scalar('loss', total_loss.item(), step)
tb_writer.add_scalar('lr', lr, step)
tb_writer.add_scalar('box_loss', losses['box'].item(), step)
tb_writer.add_scalar('anchor_loss', losses['anchor'].item(), step)
tb_writer.add_scalar('leaf_loss', losses['leaf'].item(), step)
tb_writer.add_scalar('exists_loss', losses['exists'].item(), step)
tb_writer.add_scalar('semantic_loss', losses['semantic'].item(), step)
tb_writer.add_scalar('edge_exists_loss', losses['edge_exists'].item(), step)
tb_writer.add_scalar('kldiv_loss', losses['kldiv'].item(), step)
tb_writer.add_scalar('sym_loss', losses['sym'].item(), step)
tb_writer.add_scalar('adj_loss', losses['adj'].item(), step)
return total_loss
if __name__ == '__main__':
sys.setrecursionlimit(5000) # this code uses recursion a lot for code simplicity
parser = ArgumentParser()
parser = add_train_vae_args(parser)
config = parser.parse_args()
Tree.load_category_info(config.category)
train(config)
|
the-stack_0_17900 | import hashlib
import aiofiles
import pytest
from aiofiles import os
from kanp.download import Downloader
from kanp.utils import get_real_download_url, get_video_info
@pytest.mark.asyncio
async def test_download():
url = "https://raw.githubusercontent.com/long2ice/kanp/dev/.dockerignore"
file = ".dockerignore.txt"
async with aiofiles.open(file, "ab+") as f:
async with Downloader(url,) as downloader:
async for block in downloader:
await f.write(block)
async with aiofiles.open(file, "rb") as f:
assert hashlib.md5(await f.read()).hexdigest() == "c59066fc1c16d900c6c9275c5f4a1757"
await os.remove(file)
@pytest.mark.asyncio
async def test_get_video_info():
url = "https://cn.pornhub.com/view_video.php?viewkey=ph5efb2f208eadc"
url = get_real_download_url(url)
content_length, content_type = await get_video_info(url)
assert (content_length, content_type) == (792786244, "video/mp4")
|
the-stack_0_17902 | # chat/tests.py
from channels.testing import ChannelsLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
class ChatTests(ChannelsLiveServerTestCase):
serve_static = True # emulate StaticLiveServerTestCase
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
# NOTE: Requires "chromedriver" binary to be installed in $PATH
cls.driver = webdriver.Chrome()
except:
super().tearDownClass()
raise
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_when_chat_message_posted_then_seen_by_everyone_in_same_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_1')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 2 from window 1')
finally:
self._close_all_new_windows()
def test_when_chat_message_posted_then_not_seen_by_anyone_in_different_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_2')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
self._post_message('world')
WebDriverWait(self.driver, 2).until(lambda _:
'world' in self._chat_log_value,
'Message was not received by window 2 from window 2')
self.assertTrue('hello' not in self._chat_log_value,
'Message was improperly received by window 2 from window 1')
finally:
self._close_all_new_windows()
# === Utility ===
def _enter_chat_room(self, room_name):
self.driver.get(self.live_server_url + '/chat/')
ActionChains(self.driver).send_keys(room_name + '\n').perform()
WebDriverWait(self.driver, 2).until(lambda _:
room_name in self.driver.current_url)
def _open_new_window(self):
self.driver.execute_script('window.open("about:blank", "_blank");')
self.driver.switch_to_window(self.driver.window_handles[-1])
def _close_all_new_windows(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to_window(self.driver.window_handles[-1])
self.driver.execute_script('window.close();')
if len(self.driver.window_handles) == 1:
self.driver.switch_to_window(self.driver.window_handles[0])
def _switch_to_window(self, window_index):
self.driver.switch_to_window(self.driver.window_handles[window_index])
def _post_message(self, message):
ActionChains(self.driver).send_keys(message + '\n').perform()
@property
def _chat_log_value(self):
return self.driver.find_element_by_css_selector('#chat-log').get_property('value')
|
the-stack_0_17903 | # Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Transformer decoder.
"""
import warnings
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from texar.torch.core import layers
from texar.torch.modules.decoders.decoder_base import (
DecoderBase, TokenEmbedder, TokenPosEmbedder, _make_output_layer)
from texar.torch.modules.decoders.decoder_helpers import (
EmbeddingHelper, Helper)
from texar.torch.modules.encoders.multihead_attention import (
Cache, MultiheadAttentionEncoder)
from texar.torch.modules.encoders.transformer_encoder import (
default_transformer_poswise_net_hparams)
from texar.torch.modules.networks.networks import FeedForwardNetwork
from texar.torch.utils import transformer_attentions as attn
from texar.torch.utils.beam_search import beam_search
from texar.torch.utils.shapes import mask_sequences
from texar.torch.utils.utils import sequence_mask
__all__ = [
'TransformerDecoderOutput',
'TransformerDecoder',
]
EmbeddingFn = Callable[[torch.LongTensor, torch.LongTensor], torch.Tensor]
class TransformerDecoderOutput(NamedTuple):
r"""The output of :class:`TransformerDecoder`.
"""
logits: torch.Tensor
r"""A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``
containing the logits."""
sample_id: torch.LongTensor
r"""A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing
the sampled token indices."""
class TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):
r"""Transformer decoder that applies multi-head self-attention for
sequence decoding.
It is a stack of
:class:`~texar.torch.modules.encoders.MultiheadAttentionEncoder`,
:class:`~texar.torch.modules.FeedForwardNetwork`, and residual connections.
Args:
token_embedder: An instance of :torch_nn:`Module`, or a function taking
a :tensor:`LongTensor` ``tokens`` as argument. This is the embedder
called in :meth:`embed_tokens` to convert input tokens to
embeddings.
token_pos_embedder: An instance of :torch_nn:`Module`, or a function
taking two :tensor:`LongTensor`\ s ``tokens`` and ``positions`` as
argument. This is the embedder called in :meth:`embed_tokens` to
convert input tokens with positions to embeddings.
.. note::
Only one among :attr:`token_embedder` and
:attr:`token_pos_embedder` should be specified. If neither is
specified, you must subclass :class:`TransformerDecoder` and
override :meth:`embed_tokens`.
vocab_size (int, optional): Vocabulary size. Required if
:attr:`output_layer` is `None`.
output_layer (optional): An output layer that transforms cell output
to logits. This can be:
- A callable layer, e.g., an instance of :torch_nn:`Module`.
- A tensor. A :torch_nn:`Linear` layer will be created using the
tensor as weights. The bias of the dense layer is determined
by ``hparams.output_layer_bias``. This can be used to tie the
output layer with the input embedding matrix, as proposed in
https://arxiv.org/pdf/1608.05859.pdf.
- `None`. A :torch_nn:`Linear` layer will be created based on
:attr:`vocab_size` and ``hparams.output_layer_bias``.
- If no output layer is needed at the end, set
:attr:`vocab_size` to `None` and ``output_layer`` to
:func:`~texar.torch.core.identity`.
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
# State variables used during `dynamic_decode`. Assigned in `forward`.
_state_max_decoding_length: int
_state_context: Optional[torch.LongTensor]
_state_context_sequence_length: Optional[torch.LongTensor]
_state_cache: Cache
def __init__(self,
token_embedder: Optional[TokenEmbedder] = None,
token_pos_embedder: Optional[TokenPosEmbedder] = None,
vocab_size: Optional[int] = None,
output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,
hparams=None):
super().__init__(
token_embedder, token_pos_embedder,
input_time_major=False, output_time_major=False, hparams=hparams)
if token_pos_embedder is None and token_embedder is not None:
warnings.warn(
"Transformer models cannot capture positional information if "
"no positional embedding is provided.")
self._input_size = self._hparams.dim
self._output_layer, self._vocab_size = _make_output_layer(
output_layer, vocab_size, self._input_size,
self._hparams.output_layer_bias)
self.self_attns = nn.ModuleList()
self.self_attn_layer_norm = nn.ModuleList()
self.enc_dec_attns = nn.ModuleList()
self.end_dec_attn_layer_norm = nn.ModuleList()
self.poswise_networks = nn.ModuleList()
self.poswise_layer_norm = nn.ModuleList()
if self._hparams.use_gpt_config:
eps = 1e-5
else:
eps = 1e-12
for _ in range(self._hparams.num_blocks):
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.self_attns.append(attn_module)
self.self_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
attn_module = MultiheadAttentionEncoder(
self._input_size, self._hparams.multihead_attention)
if self._hparams.dim != attn_module.output_size:
raise ValueError("The output dimension of "
"MultiheadEncoder should be equal "
"to the dim of TransformerDecoder")
self.enc_dec_attns.append(attn_module)
self.end_dec_attn_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
poswise_network = FeedForwardNetwork(
hparams=self._hparams.poswise_feedforward)
if (poswise_network.hparams.layers[-1]['kwargs']['out_features']
!= self._hparams.dim):
raise ValueError("The output dimension of "
"FeedForwardNetwork should be equal "
"to the dim of TransformerDecoder")
self.poswise_networks.append(poswise_network)
self.poswise_layer_norm.append(
nn.LayerNorm(self._input_size, eps=eps))
self.final_layer_norm = nn.LayerNorm(self._input_size, eps=eps)
self.embed_dropout = nn.Dropout(self._hparams.embedding_dropout)
self.residual_dropout = nn.Dropout(self._hparams.residual_dropout)
if self._hparams.initializer:
# TODO: This might be different to what TensorFlow does
initialize = layers.get_initializer(self._hparams.initializer)
assert initialize is not None
# Do not re-initialize LayerNorm modules.
for name, param in self.named_parameters():
if name.split(".")[-1] == "weight" and "layer_norm" not in name:
initialize(param)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
# Same as in TransformerEncoder
"num_blocks": 6,
"dim": 512,
"use_gpt_config": False,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"poswise_feedforward": default_transformer_poswise_net_hparams,
"multihead_attention": {
'name': 'multihead_attention',
'num_units': 512,
'output_dim': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'use_bias': False,
},
"initializer": None,
"name": "transformer_decoder"
# Additional for TransformerDecoder
"embedding_tie": True,
"output_layer_bias": False,
"max_decoding_length": int(1e10),
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoder.
`"use_gpt_config"`: bool
Whether to follow the `eps` setting of OpenAI GPT.
`"embedding_dropout"`: float
Dropout rate of the input word and position embeddings.
`"residual_dropout"`: float
Dropout rate of the residual connections.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``dim``.
See
:func:`~texar.torch.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``output_dim`` in this module is equal to ``dim``.
See :class:`~texar.torch.modules.MultiheadAttentionEncoder`
for details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~texar.torch.core.get_initializer` for details.
`"embedding_tie"`: bool
Whether to use the word embedding matrix as the output layer
that computes logits. If `False`, a new dense layer is created.
`"output_layer_bias"`: bool
Whether to use bias to the output layer.
`"max_decoding_length"`: int
The maximum allowed number of decoding steps.
Set to a very large number of avoid the length constraint.
Ignored if provided in :meth:`forward` or ``"train_greedy"``
decoding is used.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'use_gpt_config': False,
'embedding_tie': True,
'output_layer_bias': False,
'max_decoding_length': int(1e10),
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
},
'initializer': None,
'name': "transformer_decoder",
}
def _inputs_to_outputs(self, inputs: torch.Tensor,
cache: Cache) -> Tuple[torch.Tensor, Cache]:
r"""Returns the outputs of one decoding step (for example,
the predicted logits of the next token).
:attr:`inputs` should be of shape ``[batch_size, dim]``.
Returns:
A tuple of logits and updated cache. Logits are of shape
``[batch_size, vocab_size]``.
"""
outputs = self._self_attention_stack(
inputs.unsqueeze(1), memory=cache['memory'], cache=cache)
outputs = self._output_layer(outputs)
outputs = outputs.squeeze(1)
return outputs, cache
def forward(self, # type: ignore
inputs: Optional[torch.Tensor] = None,
sequence_length: Optional[torch.LongTensor] = None,
memory: Optional[torch.Tensor] = None,
memory_sequence_length: Optional[torch.LongTensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
context: Optional[torch.Tensor] = None,
context_sequence_length: Optional[torch.LongTensor] = None,
helper: Optional[Helper] = None,
decoding_strategy: str = 'train_greedy',
max_decoding_length: Optional[int] = None,
impute_finished: bool = False,
infer_mode: Optional[bool] = None,
beam_width: Optional[int] = None,
length_penalty: float = 0.,
**kwargs) \
-> Union[
TransformerDecoderOutput,
Tuple[TransformerDecoderOutput, torch.LongTensor],
Dict[str, torch.Tensor]]:
r"""Performs decoding.
The interface is very similar to that of RNN decoders
(:class:`texar.torch.modules.RNNDecoderBase`). In particular,
the function provides **3 ways** to specify the decoding method, with
varying flexibility:
1. The :attr:`decoding_strategy` argument.
- **"train_greedy"**: decoding in teacher-forcing fashion (i.e.,
feeding ground truth to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Argument :attr:`inputs` is required for this strategy.
:attr:`sequence_length` is optional.
- **"infer_greedy"**: decoding in inference fashion (i.e., feeding
`generated` sample to decode the next step), and for each step
sample is obtained by taking the `argmax` of logits.
Arguments :attr:`(start_tokens, end_token)` are
required for this strategy, and argument
:attr:`max_decoding_length` is optional.
- **"infer_sample"**: decoding in inference fashion, and for each
step sample is obtained by `random sampling` from the logits.
Arguments :attr:`(start_tokens, end_token)` are required for this
strategy, and argument :attr:`max_decoding_length` is optional.
This argument is used only when arguments :attr:`helper` and
:attr:`beam_width` are both `None`.
2. The :attr:`helper` argument: An instance of subclass of
:class:`texar.torch.modules.decoders.Helper`.
This provides a superset of decoding strategies than above.
The interface is the same as in RNN decoders.
Please refer to :meth:`texar.torch.modules.RNNDecoderBase.forward`
for detailed usage and examples.
Note that, here, though using a
:class:`~texar.torch.decoder.TrainingHelper` corresponding to the
``"train_greedy"`` strategy above, the implementation is *slower*
than directly setting ``decoding_strategy="train_greedy"`` (though
output results are the same).
Argument :attr:`max_decoding_length` is optional.
3. **Beam search**: set :attr:`beam_width` to use beam search decoding.
Arguments :attr:`(start_tokens, end_token)` are required,
and argument :attr:`max_decoding_length` is optional.
Args:
memory (optional): The memory to attend, e.g., the output of an RNN
encoder. A :tensor:`Tensor` of shape
``[batch_size, memory_max_time, dim]``.
memory_sequence_length (optional): A :tensor:`Tensor` of shape
``[batch_size]`` containing the sequence lengths for the batch
entries in memory. Used to create attention bias of
:attr:`memory_attention_bias` is not given. Ignored if
:attr:`memory_attention_bias` is provided.
memory_attention_bias (optional): A :tensor:`Tensor` of shape
``[batch_size, num_heads, memory_max_time, dim]``.
An attention bias typically sets the value of a padding
position to a large negative value for masking. If not given,
:attr:`memory_sequence_length` is used to automatically
create an attention bias.
inputs (optional): Input tensors for teacher forcing decoding.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``, or when `hparams`-configured helper is used.
The attr:`inputs` is a :tensor:`LongTensor` used as index to
look up embeddings and feed in the decoder. For example, if
:attr:`embedder` is an instance of
:class:`~texar.torch.modules.WordEmbedder`, then :attr:`inputs`
is usually a 2D int Tensor `[batch_size, max_time]` (or
`[max_time, batch_size]` if `input_time_major` == `True`)
containing the token indexes.
sequence_length (optional): A :tensor:`LongTensor` of shape
``[batch_size]``, containing the sequence length of
:attr:`inputs`. Tokens beyond the respective sequence length are
masked out.
Used when :attr:`decoding_strategy` is set to
``"train_greedy"``.
decoding_strategy (str): A string specifying the decoding
strategy, including ``"train_greedy"``, ``"infer_greedy"``,
``"infer_sample"``.
Different arguments are required based on the
strategy. See above for details. Ignored if
:attr:`beam_width` or :attr:`helper` is set.
beam_width (int): Set to use beam search. If given,
:attr:`decoding_strategy` is ignored.
length_penalty (float): Length penalty coefficient used in beam
search decoding. Refer to https://arxiv.org/abs/1609.08144
for more details.
It should be larger if longer sentences are desired.
context (optional): An :tensor:`LongTensor` of shape
``[batch_size, length]``, containing the starting tokens for
decoding. If context is set, ``start_tokens`` of the
:class:`~texar.torch.modules.Helper` will be ignored.
context_sequence_length (optional): Specify the length of context.
max_decoding_length (int, optional): The maximum allowed number of
decoding steps.
If `None` (default), use ``"max_decoding_length"`` defined in
:attr:`hparams`. Ignored in ``"train_greedy"`` decoding.
impute_finished (bool): If `True`, then states for batch
entries which are marked as finished get copied through and
the corresponding outputs get zeroed out. This causes some
slowdown at each time step, but ensures that the final state
and outputs have the correct values and that backprop ignores
time steps that were marked as finished. Ignored in
``"train_greedy"`` decoding.
helper (optional): An instance of
:class:`texar.torch.modules.decoders.Helper`
that defines the decoding strategy. If given,
``decoding_strategy`` and helper configurations in
:attr:`hparams` are ignored.
infer_mode (optional): If not `None`, overrides mode given by
:attr:`self.training`.
Returns:
- For **"train_greedy"** decoding, returns an instance of
:class:`~texar.torch.modules.TransformerDecoderOutput` which
contains `sample_id` and `logits`.
- For **"infer_greedy"** and **"infer_sample"** decoding or
decoding with :attr:`helper`, returns
a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an
instance of :class:`~texar.torch.modules.TransformerDecoderOutput`
as in `"train_greedy"`, and ``sequence_lengths`` is a
:tensor:`LongTensor` of shape ``[batch_size]`` containing the
length of each sample.
- For **beam search** decoding, returns a ``dict`` containing keys
``"sample_id"`` and ``"log_prob"``.
- ``"sample_id"`` is a :tensor:`LongTensor` of shape
``[batch_size, max_time, beam_width]`` containing generated
token indexes. ``sample_id[:,:,0]`` is the highest-probable
sample.
- ``"log_prob"`` is a :tensor:`Tensor` of shape
``[batch_size, beam_width]`` containing the log probability
of each sequence sample.
"""
if memory is not None:
if memory_attention_bias is None:
if memory_sequence_length is None:
raise ValueError(
"`memory_sequence_length` is required if "
"`memory_attention_bias` is not given.")
enc_padding = 1 - sequence_mask(
memory_sequence_length, memory.size(1),
dtype=torch.float32)
memory_attention_bias = attn.attention_bias_ignore_padding(
enc_padding)
# record the context, which will be used in step function
# for dynamic_decode
if context is not None:
if context_sequence_length is None:
raise ValueError("'context_sequence_length' must not be None"
"when 'context' is specified.")
self._state_context = context[:, 1:]
self._state_context_sequence_length = context_sequence_length - 1
else:
self._state_context = None
self._state_context_sequence_length = None
# Faster code path for teacher-forcing training
if (helper is None and beam_width is None and
decoding_strategy == 'train_greedy'):
if inputs is None:
raise ValueError("'input' must not be none "
"when using 'train_greedy' decoding strategy.")
times = torch.arange(
inputs.size(1), dtype=torch.long, device=inputs.device)
times = times.unsqueeze(0).expand(inputs.size(0), -1)
inputs = self.embed_tokens(inputs, times)
if sequence_length is not None:
inputs = mask_sequences(inputs, sequence_length)
decoder_self_attention_bias = (
attn.attention_bias_lower_triangle(inputs.size(1)))
decoder_output = self._self_attention_stack(
inputs, memory, decoder_self_attention_bias,
memory_attention_bias, cache=None)
logits = self._output_layer(decoder_output)
sample_id = torch.argmax(logits, dim=-1)
return TransformerDecoderOutput(logits, sample_id)
# Inference code path.
if max_decoding_length is None:
max_decoding_length = self._hparams.max_decoding_length
self._state_max_decoding_length = max_decoding_length
if beam_width is None or beam_width == 1: # Inference-like decoding
# Prepare helper
if helper is None:
kwargs.update(decoding_strategy=decoding_strategy)
if context is not None:
kwargs.update(start_tokens=context[:, 0])
helper = self._create_or_get_helper(infer_mode, **kwargs)
assert isinstance(helper, EmbeddingHelper)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=False, batch_size=helper.batch_size)
if context is not None:
assert self._state_context is not None
pad_length = max_decoding_length - self._state_context.size(1)
if pad_length > 0:
self._state_context = torch.cat((
self._state_context,
self._state_context.new_zeros(
self._state_context.size(0), pad_length)
), dim=1)
outputs, cache, sequence_lengths = self.dynamic_decode(
helper, inputs=None, sequence_length=None,
initial_state=None, max_decoding_length=max_decoding_length,
impute_finished=impute_finished)
del cache # not used
if context is not None:
# Here the length of sample_id will be larger than that
# of logit by 1, because there will be a additional
# start_token in the returned sample_id.
# the start_id should be the first token of the
# given context
start_tokens = context[:, 0]
outputs = TransformerDecoderOutput(
logits=outputs.logits,
sample_id=torch.cat([
start_tokens.unsqueeze(1),
outputs.sample_id
], dim=1))
sequence_lengths = sequence_lengths + 1
return outputs, sequence_lengths
else: # Beam-search decoding
# Ignore `decoding_strategy` and # assume `helper` is not set.
if helper is not None:
raise ValueError("Must not set 'beam_width' and 'helper' "
"simultaneously.")
if context is not None:
start_tokens = context[:, 0]
else:
if 'start_tokens' not in kwargs:
raise ValueError(
"'start_tokens' must be specified when using"
"beam search decoding.")
start_tokens = kwargs['start_tokens']
_batch_size = start_tokens.size(0)
self._state_cache = self._init_cache(
memory, memory_attention_bias,
beam_search_decoding=True,
batch_size=_batch_size)
end_token: int = kwargs.get('end_token') # type: ignore
# The output format is different when running beam search.
sample_id, log_prob = self.beam_decode(
start_tokens,
end_token,
embedding_fn=self.embed_tokens,
beam_width=beam_width,
length_penalty=length_penalty,
decode_length=max_decoding_length)
return {
'sample_id': sample_id,
'log_prob': log_prob
}
def _self_attention_stack(
self, inputs: torch.Tensor,
memory: Optional[torch.Tensor],
decoder_self_attention_bias: Optional[torch.Tensor] = None,
memory_attention_bias: Optional[torch.Tensor] = None,
cache: Optional[Cache] = None) -> torch.Tensor:
r"""Forward through the stacked multi-head attentions.
"""
inputs = self.embed_dropout(inputs)
if cache is not None:
if memory is not None:
memory_attention_bias = cache['memory_attention_bias']
else:
assert decoder_self_attention_bias is not None
x = inputs
for i in range(self._hparams.num_blocks):
layer_cache = cache['layers'][i] if cache is not None else None
selfatt_output = self.self_attns[i](
queries=self.self_attn_layer_norm[i](x),
memory=None,
memory_attention_bias=decoder_self_attention_bias,
cache=layer_cache)
x = x + self.residual_dropout(selfatt_output)
if memory is not None:
encdec_output = self.enc_dec_attns[i](
queries=self.end_dec_attn_layer_norm[i](x),
memory=memory,
memory_attention_bias=memory_attention_bias)
x = x + self.residual_dropout(encdec_output)
sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))
x = x + self.residual_dropout(sub_output)
return self.final_layer_norm(x)
def _init_cache(self, memory: Optional[torch.Tensor],
memory_attention_bias: Optional[torch.Tensor],
beam_search_decoding: bool,
batch_size: int) -> Cache:
r"""Returns an initialized cache.
In order to support both inference-like decoding and beam-search
decoding, the elements of each layer must be initialized and extended
as different structure respectively. Specifically, for inference-like
decoding, a simple list is used; for beam-search decoding, a
:tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``
is maintained, where ``current_steps`` is the number of steps currently
decoded.
"""
device = next(self.parameters()).device
def _create_ta():
return []
def _create_empty_tensor():
ret = torch.zeros(
batch_size, 0, self._hparams.multihead_attention.num_units,
dtype=torch.float, device=device)
return ret
_create_fn = (_create_empty_tensor if beam_search_decoding
else _create_ta)
cache: Cache = {
'memory': memory,
'memory_attention_bias': memory_attention_bias,
'layers': [{
'keys': _create_fn(),
'values': _create_fn(),
} for _ in range(self._hparams.num_blocks)],
}
return cache
def beam_decode(self, start_tokens: torch.LongTensor, end_token: int,
embedding_fn: Callable[
[torch.LongTensor, torch.LongTensor], torch.Tensor],
decode_length: int = 256, beam_width: int = 5,
length_penalty: float = 0.6) \
-> Tuple[torch.Tensor, torch.Tensor]:
def _symbols_to_logits_fn(ids, cache):
batch_size = ids.size(0)
step = ids.size(-1) - 1
times = ids.new_full((batch_size,), step)
inputs = embedding_fn(ids[:, -1], times)
return self._inputs_to_outputs(inputs, cache)
assert self._vocab_size is not None
outputs, log_prob = beam_search(
_symbols_to_logits_fn,
start_tokens,
beam_width,
decode_length,
self._vocab_size,
length_penalty,
states=self._state_cache,
eos_id=end_token)
# Ignores <BOS>
outputs = outputs[:, :, 1:]
# shape = [batch_size, seq_length, beam_width]
outputs = outputs.permute(0, 2, 1)
return outputs, log_prob
@property
def output_size(self) -> int:
r"""Output size of one step.
"""
return self._input_size
def initialize(self, helper: Helper, inputs: Optional[torch.Tensor],
sequence_length: Optional[torch.LongTensor],
initial_state: Optional[Cache]) \
-> Tuple[torch.ByteTensor, torch.Tensor, Cache]:
initial_finished, initial_inputs = helper.initialize(
self.embed_tokens, inputs, sequence_length)
state = initial_state or self._state_cache
return initial_finished, initial_inputs, state
def step(self, helper: Helper, time: int,
inputs: torch.Tensor, state: Optional[Cache]) \
-> Tuple[TransformerDecoderOutput, Cache,
torch.Tensor, torch.ByteTensor]:
assert state is not None
outputs, state = self._inputs_to_outputs(inputs, state)
sample_ids = helper.sample(time=time, outputs=outputs)
if self._state_context is not None:
assert self._state_context_sequence_length is not None
sample_ids = torch.where(
self._state_context_sequence_length > time,
self._state_context[:, time],
sample_ids)
if time + 1 == self._state_max_decoding_length:
# Maximum decoding length reached, mark all batches as finished.
# This requires special handling because performing lookup on
# position embeddings with `time + 1` may result in IndexError.
finished = torch.ones_like(sample_ids, dtype=torch.uint8)
# Since `next_inputs` will not be used, simply create a null tensor.
next_inputs = torch.empty(0)
else:
finished, next_inputs = helper.next_inputs(
self.embed_tokens, time, outputs, sample_ids)
next_state = state
outputs = TransformerDecoderOutput(
logits=outputs,
sample_id=sample_ids)
return outputs, next_state, next_inputs, finished
def finalize(self, # type: ignore
outputs: TransformerDecoderOutput,
final_state: Optional[Cache],
sequence_lengths: torch.LongTensor) \
-> Tuple[TransformerDecoderOutput, Optional[Cache]]:
# Clear state variables at end of decoding.
del self._state_max_decoding_length
del self._state_context
del self._state_context_sequence_length
del self._state_cache
return super().finalize(outputs, final_state, sequence_lengths)
|
the-stack_0_17904 | import grama as gr
import numpy as np
## Load data for RV model
from grama.data import df_stang
## Functions
def fun_critical(x):
E, mu, t, h = x
return np.pi ** 2 * E / 12 / (1 - mu ** 2) * (t / h) ** 2
var_critical = ["E", "mu", "t", "h"]
out_critical = ["sig_cr"]
def fun_applied(x):
L, w, t = x
return L / w / t
var_applied = ["L", "w", "t"]
out_applied = ["sig_app"]
def fun_limit(x):
sig_cr, sig_app = x
return sig_cr - sig_app
var_limit = ["sig_cr", "sig_app"]
out_limit = ["safety"]
## Build model
md_plate = (
gr.Model("Plate under buckling load")
>> gr.cp_function(
fun=fun_critical, var=var_critical, out=out_critical, name="Critical"
)
>> gr.cp_function(fun=fun_applied, var=var_applied, out=out_applied, name="Applied")
>> gr.cp_function(fun=fun_limit, var=var_limit, out=out_limit, name="Safety")
>> gr.cp_bounds( # Deterministic variables
t=(0.03, 0.12), # Thickness
w=(6, 18), # Width
h=(6, 18), # Height
L=(2.5e-1, 4.0e-1), # Load
)
>> gr.cp_marginals( # Random variables
E=gr.marg_gkde(df_stang.E), mu=gr.marg_gkde(df_stang.mu)
)
>> gr.cp_copula_gaussian(df_data=df_stang)
) # Dependence
|
the-stack_0_17905 | from Xdmf import *
if __name__ == "__main__":
primaryDomain = XdmfDomain.New()
testGrid = XdmfUnstructuredGrid.New()
primaryDomain.insert(testGrid)
testGeometry = XdmfGeometry.New()
for i in range (0, 11):
testGeometry.pushBackAsInt32(i);
testGrid.setGeometry(testGeometry)
testTopology = XdmfTopology.New()
testGrid.setTopology(testTopology)
arrayWriter = XdmfWriter.New("array.xmf")
primaryDomain.accept(arrayWriter)
|
the-stack_0_17906 | import matplotlib.pyplot as plt
import numpy as np
from plotData import *
from mapFeature import *
def plot_decision_boundary(theta, X, y):
plot_data(X[:, 1:3], y)
if X.shape[1] <= 3:
# Only need two points to define a line, so choose two endpoints
plot_x = np.array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2])
# Calculate the decision boundary line
plot_y = (-1/theta[2]) * (theta[1]*plot_x + theta[0])
plt.plot(plot_x, plot_y)
plt.legend(['Decision Boundary', 'Admitted', 'Not admitted'], loc=1)
plt.axis([30, 100, 30, 100])
else:
# Here is the grid range
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z = np.zeros((u.size, v.size))
# Evaluate z = theta*x over the grid
for i in range(0, u.size):
for j in range(0, v.size):
z[i, j] = np.dot(map_feature(u[i], v[j]), theta)
z = z.T
# Plot z = 0
# Notice you need to specify the range [0, 0]
cs = plt.contour(u, v, z, levels=[0], colors='r')
plt.legend([cs.collections[0]], ['Decision Boundary'])
plt.show() |
the-stack_0_17907 | import fcntl
import logging
import multiprocessing as mp
import os
import signal
import threading
import time
import conftest
import ophyd
import pytest
from pcdsdevices.interface import (BaseInterface, TabCompletionHelperClass,
get_engineering_mode, set_engineering_mode,
setup_preset_paths)
from pcdsdevices.sim import FastMotor, SlowMotor
logger = logging.getLogger(__name__)
@pytest.fixture(scope='function')
def slow_motor():
return SlowMotor(name='sim_slow')
@pytest.fixture(scope='function')
def fast_motor():
return FastMotor(name='sim_fast')
@pytest.mark.timeout(5)
def test_mv(fast_motor):
logger.debug('test_mv')
fast_motor(3, wait=True)
assert fast_motor.wm() == 3
fast_motor.mvr(1, wait=True)
assert fast_motor() == 4
@pytest.mark.timeout(5)
def test_umv(slow_motor):
logger.debug('test_umv')
start_position = slow_motor.position
delta = 2
slow_motor.umvr(delta)
assert slow_motor.position == start_position + delta
def test_camonitor(fast_motor):
logger.debug('test_camonitor')
pid = os.getpid()
def interrupt():
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
threading.Thread(target=interrupt, args=()).start()
fast_motor.camonitor()
def test_mv_ginput(monkeypatch, fast_motor):
logger.debug('test_mv_ginput')
# Importing forces backend selection, so do inside method
from matplotlib import pyplot as plt # NOQA
def fake_plot(*args, **kwargs):
return
def fake_ginput(*args, **kwargs):
return [[12, 24]]
def fake_get_fignums(*args, **kwargs):
return local_get_fignums
monkeypatch.setattr(plt, 'plot', fake_plot)
monkeypatch.setattr(plt, 'ginput', fake_ginput)
monkeypatch.setattr(plt, 'get_fignums', fake_get_fignums)
def inner_test():
fast_motor.mv_ginput()
assert fast_motor.position == 12
fast_motor.move(0)
assert fast_motor.position == 0
local_get_fignums = True
inner_test()
local_get_fignums = False
inner_test()
fast_motor._limits = (-100, 100)
inner_test()
def test_presets(presets, fast_motor):
logger.debug('test_presets')
fast_motor.mv(4, wait=True)
fast_motor.presets.add_hutch('four', comment='four!')
fast_motor.mv(3, wait=True)
fast_motor.presets.add_hutch('zero', 0, comment='center')
fast_motor.presets.add_here_user('sample')
print(fast_motor.presets.positions)
assert fast_motor.wm_zero() == -3
assert fast_motor.wm_sample() == 0
assert fast_motor.wm_four() == 1
# Clear paths, refresh, should still exist
old_paths = fast_motor.presets._paths
setup_preset_paths()
assert not hasattr(fast_motor, 'wm_zero')
setup_preset_paths(**old_paths)
assert fast_motor.wm_zero() == -3
assert fast_motor.wm_sample() == 0
fast_motor.mv_zero(wait=True)
fast_motor.mvr(1, wait=True)
assert fast_motor.wm_zero() == -1
assert fast_motor.wm() == 1
# Sleep for one so we don't override old history
time.sleep(1)
fast_motor.presets.positions.zero.update_pos(comment='hats')
assert fast_motor.wm_zero() == 0
assert fast_motor.presets.positions.zero.pos == 1
assert len(fast_motor.presets.positions.zero.history) == 2
assert len(fast_motor.presets.positions.sample.history) == 1
repr(fast_motor.presets.positions.zero)
fast_motor.presets.positions.zero.deactivate()
with pytest.raises(AttributeError):
fast_motor.wm_zero()
with pytest.raises(AttributeError):
fast_motor.presets.positions.zero
fast_motor.umv_sample()
assert fast_motor.wm() == 3
fast_motor.presets.positions.sample.update_comment('hello there')
assert len(fast_motor.presets.positions.sample.history) == 2
def block_file(path, lock):
with open(path, 'r+') as f:
fcntl.flock(f, fcntl.LOCK_EX)
lock.acquire()
fcntl.flock(f, fcntl.LOCK_UN)
path = fast_motor.presets.positions.sample.path
lock = mp.Lock()
with lock:
proc = mp.Process(target=block_file, args=(path, lock))
proc.start()
time.sleep(0.2)
assert fast_motor.presets.positions.sample.pos == 3
fast_motor.presets.positions.sample.update_pos(2)
assert not hasattr(fast_motor, 'wm_sample')
fast_motor.presets.sync()
assert not hasattr(fast_motor, 'mv_sample')
proc.join()
fast_motor.presets.sync()
assert hasattr(fast_motor, 'mv_sample')
def test_presets_type(presets, fast_motor):
logger.debug('test_presets_type')
# Mess up the input types, fail before opening the file
with pytest.raises(TypeError):
fast_motor.presets.add_here_user(123)
with pytest.raises(TypeError):
fast_motor.presets.add_user(234234, 'cats')
def test_engineering_mode():
logger.debug('test_engineering_mode')
set_engineering_mode(False)
assert not get_engineering_mode()
set_engineering_mode(True)
assert get_engineering_mode()
def test_dir_whitelist_basic(fast_motor):
logger.debug('test_dir_whitelist_basic')
set_engineering_mode(False)
user_dir = dir(fast_motor)
set_engineering_mode(True)
eng_dir = dir(fast_motor)
assert len(eng_dir) > len(user_dir)
_TAB_COMPLETION_IGNORES = {'.areadetector.', }
def _should_check_tab_completion(cls):
"""Filter out classes for checking tab completion."""
if BaseInterface in cls.mro():
# Include any Devices that have BaseInterface
return True
fully_qualified_name = f'{cls.__module__}.{cls.__name__}'
if any(name in fully_qualified_name for name in _TAB_COMPLETION_IGNORES):
# This doesn't mix BaseInterface in, but that's OK - it's on our list
return False
# This doesn't mix BaseInterface in, this may be a bad thing: warn in
# the test.
return True
@pytest.mark.parametrize(
'cls',
[pytest.param(cls, id=f'{cls.__module__}.{cls.__name__}')
for cls in conftest.find_all_device_classes()
if _should_check_tab_completion(cls)]
)
def test_tab_completion(cls):
if BaseInterface not in cls.mro():
pytest.skip(f'{cls} does not inherit from the interface')
regex = cls._class_tab.build_regex()
if getattr(cls, 'tab_component_names', False):
for name in cls.component_names:
if getattr(cls, name).kind != ophyd.Kind.omitted:
assert regex.match(name) is not None
for name in getattr(cls, 'tab_whitelist', []):
assert regex.match(name) is not None
_STATUS_PRINT_IGNORES = {
'.AttenuatorCalculatorBase',
'.BadSlitPositionerBase',
'.DelayBase',
'.IPM_Det',
'.InOutPVStatePositioner',
'.KappaXYZStage',
'.PVPositionerComparator',
'.PVPositionerDone',
'.PVPositionerIsClose',
'.PseudoSingleInterface',
'.PulsePicker',
'.SlitsBase',
'.SyncAxesBase',
'.OffsetMotorBase',
}
def _should_check_status_prints(cls):
"""Filter out classes for checking ``status_info``."""
fully_qualified_name = f'{cls.__module__}.{cls.__name__}'
if any(name in fully_qualified_name for name in _STATUS_PRINT_IGNORES):
return False
# Otherwise, include any Devices that inherit from BaseInterface.
return BaseInterface in cls.mro()
@pytest.mark.parametrize(
'cls',
[pytest.param(cls, id=f'{cls.__module__}.{cls.__name__}')
for cls in conftest.find_all_device_classes()
if _should_check_status_prints(cls)
]
)
def test_smoke_status_prints(cls):
instance = conftest.best_effort_instantiation(cls)
status_info = instance.status_info()
print(instance.format_status_info(status_info))
def test_tab_helper_no_mixin():
class MyDevice:
...
helper = TabCompletionHelperClass(MyDevice)
with pytest.raises(AssertionError):
# Must mix in BaseInterface
helper.new_instance(MyDevice())
def test_tab_helper_class():
class MyDeviceBaseA(BaseInterface, ophyd.Device):
tab_whitelist = ['a']
a = 1
class MyDeviceBaseB:
tab_whitelist = ['b']
b = 2
class MyDevice(MyDeviceBaseA, MyDeviceBaseB):
tab_whitelist = ['c']
c = 3
foobar = 4
tab_component_names = True
cpt = ophyd.Component(ophyd.Signal)
assert MyDeviceBaseA._class_tab is not MyDevice._class_tab
assert {'a'}.issubset(MyDeviceBaseA._class_tab._includes)
assert {'a', 'b', 'c', 'cpt'}.issubset(MyDevice._class_tab._includes)
instance = MyDevice(name='instance')
tab = instance._tab
assert {'a', 'b', 'c', 'cpt'}.issubset(tab._includes)
for attr in ['a', 'b', 'c', 'cpt']:
assert attr in tab.get_filtered_dir_list()
assert 'foobar' not in tab.get_filtered_dir_list()
tab.add('foobar')
assert 'foobar' in tab.get_filtered_dir_list()
tab.remove('foobar')
assert 'foobar' not in tab.get_filtered_dir_list()
tab.add('foobar')
tab.reset()
assert 'foobar' not in tab.get_filtered_dir_list()
|
the-stack_0_17908 | # -*- coding: cp936 -*-
import numpy as np
import matplotlib.pyplot as plt
import random
def get_data(dim = 2,classes = 2,count = 1000,train_ratio = 0.8,scale = None,tightness = None,centroids = None,if_show = False):
'''
Generate data clusters randomly for classification tasks.
dim -> the dimension of the data vector
classes -> number of clusters
count -> total samples
train_ratio -> train data portion
scale -> magnitude of the data ,should be > 1
tightness -> how close the data is to its centroid
centroids -> array of centers of each cluster, shape should be (B,...), where B is the number of classes
'''
if scale is None:
scale = classes / 2
elif scale < 1:
scale = 1
if tightness is None:
tightness = 0.05 * scale / classes
if centroids is None:# generate centroids for each class
centroids = (np.random.rand(classes,dim) - 0.5) * 2 * scale
X = []
Y = []
for i in range(classes): #generate data in each class
X.append(np.random.normal(0,tightness,(count / classes,dim)) + centroids[i])
Y += [i] * (count / classes)
for i in range(count - len(Y)):#pad to required count if division left a remainder
c_idx = np.random.randint(classes)
X.append(np.random.normal(0,tightness,(1,dim))+ centroids[c_idx])
Y.append(c_idx)
X = np.concatenate(X,0)
Y = np.array(Y)
p = np.random.permutation(count)
X = X[p]
Y = Y[p]
train_count = int(count * train_ratio)
X_train = X[:train_count]
X_test = X[train_count:]
Y_train = Y[:train_count]
Y_test = Y[train_count:]
if if_show: # show only the first two dimensions, may use t-sne later
if dim < 2:
plt.subplot(121)
plt.scatter(X_train[:],[0] * len(X_train))
for i in range(min(classes * 10,int(count * train_ratio))):
plt.text(X_train[i][0],0,str(Y_train[i]))
plt.subplot(122)
plt.scatter(X_test[:],[0] * len(X_test))
for i in range(min(classes * 10,int(count * (1 - train_ratio)))):
plt.text(X_test[i][0],0,str(Y_test[i]))
else:
plt.subplot(121)
plt.xlim(-1.5 * scale,1.5 * scale)
plt.ylim(-1.5 * scale,1.5 * scale)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
plt.scatter(X_train[:][:,0],X_train[:][:,1])
for i in range(min(classes * 10,int(count * train_ratio))):
plt.text(X_train[i][0],X_train[i][1],str(Y_train[i]))
plt.subplot(122)
plt.xlim(-1.5 * scale,1.5 * scale)
plt.ylim(-1.5 * scale,1.5 * scale)
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data', 0))
plt.scatter(X_test[:][:,0],X_test[:][:,1])
for i in range(min(classes * 10,int(count * (1 - train_ratio)))):
plt.text(X_test[i][0],X_test[i][1],str(Y_test[i]))
plt.show()
return X_train,Y_train,X_test,Y_test
|
the-stack_0_17910 | # -*- coding: utf8 -*-
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from git_review import tests
class GitReviewTestCase(tests.BaseGitReviewTestCase):
"""Class for the git-review tests."""
def test_cloned_repo(self):
"""Test git-review on the just cloned repository."""
self._simple_change('test file modified', 'test commit message')
self.assertNotIn('Change-Id:', self._run_git('log', '-1'))
self.assertIn('remote: New Changes:', self._run_git_review())
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def _configure_gitreview_username(self):
self._run_git('config', '--add', 'gitreview.username', 'test_user')
def test_git_review_s(self):
"""Test git-review -s."""
self._run_git('remote', 'rm', 'gerrit')
self._configure_gitreview_username()
self._run_git_review('-s')
self._simple_change('test file modified', 'test commit message')
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def test_git_review_s_in_detached_head(self):
"""Test git-review -s in detached HEAD state."""
self._run_git('remote', 'rm', 'gerrit')
self._configure_gitreview_username()
master_sha1 = self._run_git('rev-parse', 'master')
self._run_git('checkout', master_sha1)
self._run_git_review('-s')
self._simple_change('test file modified', 'test commit message')
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def test_git_review_s_with_outdated_repo(self):
"""Test git-review -s with a outdated repo."""
self._simple_change('test file to outdate', 'test commit message 1')
self._run_git('push', 'origin', 'master')
self._run_git('reset', '--hard', 'HEAD^')
# Review setup with an outdated repo
self._run_git('remote', 'rm', 'gerrit')
self._configure_gitreview_username()
self._run_git_review('-s')
self._simple_change('test file modified', 'test commit message 2')
self.assertIn('Change-Id:', self._run_git('log', '-1'))
def test_git_review_d(self):
"""Test git-review -d."""
self._run_git_review('-s')
# create new review to be downloaded
self._simple_change('test file modified', 'test commit message')
self._run_git_review()
change_id = self._run_git('log', '-1').split()[-1]
shutil.rmtree(self.test_dir)
# download clean Git repository and fresh change from Gerrit to it
self._run_git('clone', self.project_uri)
self._run_git('remote', 'add', 'gerrit', self.project_uri)
self._run_git_review('-d', change_id)
self.assertIn('test commit message', self._run_git('log', '-1'))
# second download should also work correct
self._run_git_review('-d', change_id)
self.assertIn('test commit message', self._run_git('show', 'HEAD'))
self.assertNotIn('test commit message',
self._run_git('show', 'HEAD^1'))
def test_multiple_changes(self):
"""Test git-review asks about multiple changes.
Should register user's wish to send two change requests by interactive
'yes' message and by the -y option.
"""
self._run_git_review('-s')
# 'yes' message
self._simple_change('test file modified 1st time',
'test commit message 1')
self._simple_change('test file modified 2nd time',
'test commit message 2')
review_res = self._run_git_review(confirm=True)
self.assertIn("Type 'yes' to confirm", review_res)
self.assertIn("Processing changes: new: 2", review_res)
# abandon changes sent to the Gerrit
head = self._run_git('rev-parse', 'HEAD')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_gerrit_cli('review', '--abandon', head)
self._run_gerrit_cli('review', '--abandon', head_1)
# -y option
self._simple_change('test file modified 3rd time',
'test commit message 3')
self._simple_change('test file modified 4th time',
'test commit message 4')
review_res = self._run_git_review('-y')
self.assertIn("Processing changes: new: 2", review_res)
def test_need_rebase_no_upload(self):
"""Test change needing a rebase does not upload."""
self._run_git_review('-s')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some other message',
'create conflict with master')
exc = self.assertRaises(Exception, self._run_git_review)
self.assertIn("Errors running git rebase -p -i remotes/gerrit/master",
exc.args[0])
def test_upload_without_rebase(self):
"""Test change not needing a rebase can upload without rebasing."""
self._run_git_review('-s')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some new message',
'just another file (no conflict)',
self._dir('test', 'new_test_file.txt'))
review_res = self._run_git_review('-v')
self.assertIn("Running: git rebase -p -i remotes/gerrit/master",
review_res)
self.assertEqual(self._run_git('rev-parse', 'HEAD^1'), head_1)
def test_no_rebase_check(self):
"""Test -R causes a change to be uploaded without rebase checking."""
self._run_git_review('-s')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some new message', 'just another file',
self._dir('test', 'new_test_file.txt'))
review_res = self._run_git_review('-v', '-R')
self.assertNotIn('rebase', review_res)
self.assertEqual(self._run_git('rev-parse', 'HEAD^1'), head_1)
def test_rebase_anyway(self):
"""Test -F causes a change to be rebased regardless."""
self._run_git_review('-s')
head = self._run_git('rev-parse', 'HEAD')
head_1 = self._run_git('rev-parse', 'HEAD^1')
self._run_git('checkout', '-b', 'test_branch', head_1)
self._simple_change('some new message', 'just another file',
self._dir('test', 'new_test_file.txt'))
review_res = self._run_git_review('-v', '-F')
self.assertIn('rebase', review_res)
self.assertEqual(self._run_git('rev-parse', 'HEAD^1'), head)
def _assert_branch_would_be(self, branch):
output = self._run_git_review('-n')
# last non-empty line should be:
# git push gerrit HEAD:refs/publish/master
last_line = output.strip().split('\n')[-1]
branch_was = last_line.rsplit(' ', 1)[-1].split('/', 2)[-1]
self.assertEqual(branch, branch_was)
def test_detached_head(self):
"""Test on a detached state: we shouldn't have '(detached' as topic."""
self._run_git_review('-s')
curr_branch = self._run_git('rev-parse', '--abbrev-ref', 'HEAD')
# Note: git checkout --detach has been introduced in git 1.7.5 (2011)
self._run_git('checkout', curr_branch + '^0')
self._simple_change('some new message', 'just another file',
self._dir('test', 'new_test_file.txt'))
# switch to French, 'git branch' should return '(détaché du HEAD)'
lang_env = os.getenv('LANG', 'C')
os.environ.update(LANG='fr_FR.UTF-8')
try:
self._assert_branch_would_be(curr_branch)
finally:
os.environ.update(LANG=lang_env)
def test_bug_topic(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change for bug 123')
self._assert_branch_would_be('master/bug/123')
def test_bug_topic_newline(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change not for bug\n123')
self._assert_branch_would_be('master')
def test_bp_topic(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change for blueprint asdf')
self._assert_branch_would_be('master/bp/asdf')
def test_bp_topic_newline(self):
self._run_git_review('-s')
self._simple_change('a change', 'new change not for bluepring\nasdf')
self._assert_branch_would_be('master')
def test_git_review_l(self):
self._run_git_review('-s')
# Populate "project" repo
self._simple_change('project: test1', 'project: change1, merged')
self._simple_change('project: test2', 'project: change2, open')
self._simple_change('project: test3', 'project: change3, abandoned')
self._run_git_review('-y')
head = self._run_git('rev-parse', 'HEAD')
head_2 = self._run_git('rev-parse', 'HEAD^^')
self._run_gerrit_cli('review', head_2, '--code-review=+2', '--submit')
self._run_gerrit_cli('review', head, '--abandon')
# Populate "project2" repo
self._run_gerrit_cli('create-project', '--empty-commit', '--name',
'test/test_project2')
project2_uri = self.project_uri.replace('test/test_project',
'test/test_project2')
self._run_git('fetch', project2_uri, 'HEAD')
self._run_git('checkout', 'FETCH_HEAD')
self._simple_change('project2: test1', 'project2: change1, open')
self._run_git('push', project2_uri, 'HEAD:refs/for/master')
# Only project1 open changes
result = self._run_git_review('-l')
self.assertNotIn('project: change1, merged', result)
self.assertIn('project: change2, open', result)
self.assertNotIn('project: change3, abandoned', result)
self.assertNotIn('project2:', result)
class HttpGitReviewTestCase(tests.HttpMixin, GitReviewTestCase):
"""Class for the git-review tests over HTTP(S)."""
def _configure_gitreview_username(self):
# trick to set http password
self._run_git('config', '--add', 'gitreview.username',
'test_user:test_pass')
|
the-stack_0_17912 | """
Automate OpenStreetMap wiki editing.
"""
import re
from pathlib import Path
from typing import Optional
from map_machine.doc.collections import Collection
from map_machine.map_configuration import MapConfiguration
from map_machine.osm.osm_reader import Tags
from map_machine.pictogram.icon import Icon, ShapeExtractor
from map_machine.scheme import Scheme
from map_machine.workspace import Workspace
WORKSPACE: Workspace = Workspace(Path("temp"))
SCHEME: Scheme = Scheme.from_file(WORKSPACE.DEFAULT_SCHEME_PATH)
EXTRACTOR: ShapeExtractor = ShapeExtractor(
WORKSPACE.ICONS_PATH, WORKSPACE.ICONS_CONFIG_PATH
)
HEADER_PATTERN: re.Pattern = re.compile("==?=?.*==?=?")
HEADER_2_PATTERN: re.Pattern = re.compile("== .* ==")
HEADER_PATTERNS: list[re.Pattern] = [
re.compile("==\\s*Example.*=="),
re.compile("==\\s*See also\\s*=="),
]
RENDERING_HEADER_PATTERN: re.Pattern = re.compile("==\\s*Rendering.*==")
ROENTGEN_HEADER_PATTERN: re.Pattern = re.compile("===.*Röntgen.*===")
class WikiTable:
"""SVG table with icon combinations."""
def __init__(self, collection: Collection, page_name: str):
self.collection: Collection = collection
self.page_name: str = page_name
def generate_wiki_table(self) -> tuple[str, list[Icon]]:
"""
Generate Röntgen icon table for the OpenStreetMap wiki page.
"""
icons: list[Icon] = []
text: str = '{| class="wikitable"\n'
if self.collection.column_key is not None:
text += f"! {{{{Key|{self.collection.column_key}}}}}"
else:
text += "! Tag || Icon"
if self.collection.row_tags:
text += "\n"
for current_tags in self.collection.row_tags:
text += "|-\n"
text += "| "
if current_tags:
for key, value in current_tags.items():
if value == "*":
text += f"{{{{Key|{key}}}}}<br />"
else:
text += f"{{{{Tag|{key}|{value}}}}}<br />"
text = text[:-6]
text += "\n"
icon, _ = SCHEME.get_icon(
EXTRACTOR,
current_tags | self.collection.tags,
set(),
MapConfiguration(ignore_level_matching=True),
)
icons.append(icon.main_icon)
text += (
"| "
f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n"
)
text += "|}\n"
return text, icons
if not self.collection.column_values:
self.collection.column_values = [""]
else:
make_vertical: bool = False
for column_value in self.collection.column_values:
if column_value and len(column_value) > 2:
make_vertical = True
for column_value in self.collection.column_values:
text += " ||"
if column_value:
tag: str = (
f"{{{{TagValue|"
f"{self.collection.column_key}|{column_value}}}}}"
)
text += " " + (
f"{{{{vert header|{tag}}}}}" if make_vertical else tag
)
text += "\n"
for row_value in self.collection.row_values:
text += "|-\n"
if row_value:
text += f"| {{{{Tag|{self.collection.row_key}|{row_value}}}}}\n"
else:
text += "|\n"
for column_value in self.collection.column_values:
current_tags: Tags = dict(self.collection.tags) | {
self.collection.row_key: row_value
}
if column_value:
current_tags |= {self.collection.column_key: column_value}
icon, _ = SCHEME.get_icon(EXTRACTOR, current_tags, set())
if not icon:
print("Icon was not constructed.")
text += (
"| "
f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n"
)
icons.append(icon.main_icon)
text += "|}\n"
return text, icons
def generate_new_text(
old_text: str,
table: WikiTable,
) -> tuple[Optional[str], list[Icon]]:
"""
Generate Röntgen icon table for the OpenStreetMap wiki page.
:param old_text: previous wiki page text
:param table: wiki table generator
:return: new wiki page text
"""
wiki_text: str
icons = []
if table.collection.row_key or table.collection.row_tags:
wiki_text, icons = table.generate_wiki_table()
else:
processed = set()
icon, _ = SCHEME.get_icon(
EXTRACTOR, table.collection.tags, processed, MapConfiguration()
)
if not icon.main_icon.is_default():
wiki_text = (
f"[[Image:Röntgen {icon.main_icon.get_name()}.svg|32px]]\n"
)
icons.append(icon.main_icon)
elif icon.extra_icons:
wiki_text = (
f"Röntgen icon set has additional icon for the tag: "
f"[[Image:Röntgen {icon.extra_icons[0].get_name()}.svg|32px]]."
f"\n"
)
icons.append(icon.extra_icons[0])
else:
wiki_text = ""
lines: list[str] = old_text.split("\n")
# If rendering section already exists.
start: Optional[int] = None
end: int = -1
for index, line in enumerate(lines):
if HEADER_2_PATTERN.match(line):
if start is not None:
end = index
break
if RENDERING_HEADER_PATTERN.match(line):
start = index
if start is not None:
return (
"\n".join(lines[: start + 2])
+ "\n=== [[Röntgen]] icons in [[Map Machine]] ===\n"
+ f"\n{wiki_text}\n"
+ "\n".join(lines[end:])
), icons
# If Röntgen rendering section already exists.
start: Optional[int] = None
end: int = -1
for index, line in enumerate(lines):
if HEADER_PATTERN.match(line):
if start is not None:
end = index
break
if ROENTGEN_HEADER_PATTERN.match(line):
start = index
if start is not None:
return (
"\n".join(lines[: start + 2])
+ f"\n{wiki_text}\n"
+ "\n".join(lines[end:])
), icons
# Otherwise.
headers: list[Optional[int]] = [None, None]
for index, line in enumerate(lines):
for i, pattern in enumerate(HEADER_PATTERNS):
if pattern.match(line):
headers[i] = index
filtered = list(filter(lambda x: x is not None, headers))
header: int
if filtered:
header = filtered[0]
else:
lines += [""]
header = len(lines)
return (
"\n".join(lines[:header])
+ "\n== Rendering ==\n\n=== [[Röntgen]] icons in [[Map Machine]] "
"===\n\n" + wiki_text + "\n" + "\n".join(lines[header:])
), icons
|
the-stack_0_17913 | import re
from django.conf import settings
SENSITIVE_KEYS = ['password', 'token', 'access', 'refresh']
if hasattr(settings, 'DRF_API_LOGGER_EXCLUDE_KEYS'):
if type(settings.DRF_API_LOGGER_EXCLUDE_KEYS) in (list, tuple):
SENSITIVE_KEYS.extend(settings.DRF_API_LOGGER_EXCLUDE_KEYS)
def get_headers(request=None):
"""
Function: get_headers(self, request)
Description: To get all the headers from request
"""
regex = re.compile('^HTTP_')
return dict((regex.sub('', header), value) for (header, value)
in request.META.items() if header.startswith('HTTP_'))
def get_client_ip(request):
try:
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
except:
return ''
def is_api_logger_enabled():
drf_api_logger_database = False
if hasattr(settings, 'DRF_API_LOGGER_DATABASE'):
drf_api_logger_database = settings.DRF_API_LOGGER_DATABASE
drf_api_logger_signal = False
if hasattr(settings, 'DRF_API_LOGGER_SIGNAL'):
drf_api_logger_signal = settings.DRF_API_LOGGER_SIGNAL
return drf_api_logger_database or drf_api_logger_signal
def database_log_enabled():
drf_api_logger_database = False
if hasattr(settings, 'DRF_API_LOGGER_DATABASE'):
drf_api_logger_database = settings.DRF_API_LOGGER_DATABASE
return drf_api_logger_database
def mask_sensitive_data(data):
"""
Hides sensitive keys specified in sensitive_keys settings.
Loops recursively over nested dictionaries.
"""
if type(data) != dict:
return data
for key, value in data.items():
if key in SENSITIVE_KEYS:
data[key] = "***FILTERED***"
if type(value) == dict:
data[key] = mask_sensitive_data(data[key])
if type(value) == list:
data[key] = [mask_sensitive_data(item) for item in data[key]]
return data
|
the-stack_0_17917 | #!/usr/bin/env python3
"""
Contains the functionality around DotNet Cli.
"""
from argparse import Action, ArgumentParser, ArgumentTypeError, ArgumentError
from collections import namedtuple
from glob import iglob
from json import loads
from logging import getLogger
from os import chmod, environ, listdir, makedirs, path, pathsep
from re import search
from shutil import rmtree
from stat import S_IRWXU
from subprocess import check_output
from sys import argv, platform
from typing import Tuple
from urllib.parse import urlparse
from urllib.request import urlopen, urlretrieve
from performance.common import get_repo_root_path
from performance.common import get_tools_directory
from performance.common import push_dir
from performance.common import RunCommand
from performance.common import validate_supported_runtime
from performance.logger import setup_loggers
from channel_map import ChannelMap
def info(verbose: bool) -> None:
"""
Executes `dotnet --info` in order to get the .NET Core information from the
dotnet executable.
"""
cmdline = ['dotnet', '--info']
RunCommand(cmdline, verbose=verbose).run()
def __log_script_header(message: str):
message_length = len(message)
getLogger().info('-' * message_length)
getLogger().info(message)
getLogger().info('-' * message_length)
CSharpProjFile = namedtuple('CSharpProjFile', [
'file_name',
'working_directory'
])
class FrameworkAction(Action):
'''
Used by the ArgumentParser to represent the information needed to parse the
supported .NET frameworks argument from the command line.
'''
def __call__(self, parser, namespace, values, option_string=None):
if values:
setattr(namespace, self.dest, list(set(values)))
@staticmethod
def get_target_framework_moniker(framework: str) -> str:
'''
Translates framework name to target framework moniker (TFM)
To run CoreRT benchmarks we need to run the host BDN process as latest
.NET Core the host process will build and run CoreRT benchmarks
'''
return 'netcoreapp5.0' if framework == 'corert' else framework
@staticmethod
def get_target_framework_monikers(frameworks: list) -> list:
'''
Translates framework names to target framework monikers (TFM)
Required to run CoreRT benchmarks where the host process must be .NET
Core, not CoreRT.
'''
monikers = [
FrameworkAction.get_target_framework_moniker(framework)
for framework in frameworks
]
# ['netcoreapp5.0', 'corert'] should become ['netcoreapp5.0']
return list(set(monikers))
class VersionsAction(Action):
'''
Argument parser helper class used to validates the dotnet-versions input.
'''
def __call__(self, parser, namespace, values, option_string=None):
if values:
for version in values:
if not search(r'^\d\.\d+\.\d+', version):
raise ArgumentTypeError(
'Version "{}" is in the wrong format'.format(version))
setattr(namespace, self.dest, values)
class CompilationAction(Action):
'''
Tiered: (Default)
NoTiering: Tiering is disabled, but R2R code is not disabled.
This includes R2R code, useful for comparison against Tiered and
FullyJittedNoTiering for changes to R2R code or tiering.
Default: Don't set any environment variables. Use what the compiler views
as the default.
FullyJittedNoTiering: Tiering and R2R are disabled.
This is JIT-only, useful for comparison against Tiered and NoTiering
for changes to R2R code or tiering.
MinOpt:
Uses minopt-JIT for methods that do not have pregenerated code, useful
for startup time comparisons in scenario benchmarks that include a
startup time measurement (probably not for microbenchmarks), probably
not useful for a PR.
For PRs it is recommended to kick off a Tiered run, and being able to
manually kick-off NoTiering and FullyJittedNoTiering modes when needed.
'''
# TODO: Would 'Default' make sense for .NET Framework / CoreRT / Mono?
# TODO: Should only be required for benchmark execution under certain tools
TIERED = 'Tiered'
NO_TIERING = 'NoTiering'
DEFAULT = 'Default'
FULLY_JITTED_NO_TIERING = 'FullyJittedNoTiering'
MIN_OPT = 'MinOpt'
def __call__(self, parser, namespace, values, option_string=None):
if values:
if values not in CompilationAction.modes():
raise ArgumentTypeError('Unknown mode: {}'.format(values))
setattr(namespace, self.dest, values)
@staticmethod
def __set_mode(mode: str) -> None:
# Remove potentially set environments.
COMPLUS_ENVIRONMENTS = [
'COMPlus_JITMinOpts',
'COMPlus_ReadyToRun',
'COMPlus_TieredCompilation',
'COMPlus_ZapDisable',
]
for complus_environment in COMPLUS_ENVIRONMENTS:
if complus_environment in environ:
environ.pop(complus_environment)
# Configure .NET Runtime
if mode == CompilationAction.TIERED:
environ['COMPlus_TieredCompilation'] = '1'
elif mode == CompilationAction.NO_TIERING:
environ['COMPlus_TieredCompilation'] = '0'
elif mode == CompilationAction.FULLY_JITTED_NO_TIERING:
environ['COMPlus_ReadyToRun'] = '0'
environ['COMPlus_TieredCompilation'] = '0'
environ['COMPlus_ZapDisable'] = '1'
elif mode == CompilationAction.MIN_OPT:
environ['COMPlus_JITMinOpts'] = '1'
environ['COMPlus_TieredCompilation'] = '0'
elif mode != CompilationAction.DEFAULT:
raise ArgumentTypeError('Unknown mode: {}'.format(mode))
@staticmethod
def validate(usr_mode: str) -> str:
'''Validate user input.'''
requested_mode = None
for mode in CompilationAction.modes():
if usr_mode.casefold() == mode.casefold():
requested_mode = mode
break
if not requested_mode:
raise ArgumentTypeError('Unknown mode: {}'.format(usr_mode))
CompilationAction.__set_mode(requested_mode)
return requested_mode
@staticmethod
def modes() -> list:
'''Available .NET Performance modes.'''
return [
CompilationAction.DEFAULT,
CompilationAction.TIERED,
CompilationAction.NO_TIERING,
CompilationAction.FULLY_JITTED_NO_TIERING,
CompilationAction.MIN_OPT
]
@staticmethod
def noenv() -> str:
'''Default .NET performance mode.'''
return CompilationAction.modes()[0] # No environment set
@staticmethod
def help_text() -> str:
'''Gets the help string describing the different compilation modes.'''
return '''Different compilation modes that can be set to change the
.NET compilation behavior. The default configurations have changed between
releases of .NET. These flags enable ensuring consistency when running
more than one runtime. The different modes are: {}: no
environment variables are set; {}: tiering is enabled.
{}: tiering is disabled, but includes R2R code, and it is useful for
comparison against Tiered; {}: This is JIT-only, useful for comparison
against Tiered and NoTier for changes to R2R code or tiering; {}: uses
minopt-JIT for methods that do not have pregenerated code, and useful
for startup time comparisons in scenario benchmarks that include a
startup time measurement (probably not for microbenchmarks), probably
not useful for a PR.'''.format(
CompilationAction.DEFAULT,
CompilationAction.TIERED,
CompilationAction.NO_TIERING,
CompilationAction.FULLY_JITTED_NO_TIERING,
CompilationAction.MIN_OPT
)
class CSharpProject:
'''
This is a class wrapper around the `dotnet` command line interface.
Remark: It assumes dotnet is already in the PATH.
'''
def __init__(self, project: CSharpProjFile, bin_directory: str):
if not project.file_name:
raise TypeError('C# file name cannot be null.')
if not project.working_directory:
raise TypeError('C# working directory cannot be null.')
if not bin_directory:
raise TypeError('bin folder cannot be null.')
self.__csproj_file = path.abspath(project.file_name)
self.__working_directory = path.abspath(project.working_directory)
self.__bin_directory = bin_directory
if not path.isdir(self.__working_directory):
raise ValueError(
'Specified working directory: {}, does not exist.'.format(
self.__working_directory
)
)
if not path.isfile(self.__csproj_file):
raise ValueError(
'Specified project file: {}, does not exist.'.format(
self.__csproj_file
)
)
@property
def working_directory(self) -> str:
'''Gets the working directory for the dotnet process to be started.'''
return self.__working_directory
@property
def csproj_file(self) -> str:
'''Gets the project file to run the dotnet cli against.'''
return self.__csproj_file
@property
def project_name(self) -> str:
'''Gets the project name.'''
return path.splitext(path.basename(self.__csproj_file))[0]
@property
def bin_path(self) -> str:
'''Gets the directory in which the built binaries will be placed.'''
return self.__bin_directory
def restore(self,
packages_path: str,
verbose: bool,
runtime_identifier: str = None) -> None:
'''
Calls dotnet to restore the dependencies and tools of the specified
project.
Keyword arguments:
packages_path -- The directory to restore packages to.
'''
if not packages_path:
raise TypeError('Unspecified packages directory.')
cmdline = [
'dotnet', 'restore',
self.csproj_file,
'--packages', packages_path
]
if runtime_identifier:
cmdline += ['--runtime', runtime_identifier]
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
def build(self,
configuration: str,
verbose: bool,
packages_path: str,
target_framework_monikers: list = None,
output_to_bindir: bool = False,
runtime_identifier: str = None,
*args) -> None:
'''Calls dotnet to build the specified project.'''
if not target_framework_monikers: # Build all supported frameworks.
cmdline = [
'dotnet', 'build',
self.csproj_file,
'--configuration', configuration,
'--no-restore',
"/p:NuGetPackageRoot={}".format(packages_path),
]
if output_to_bindir:
cmdline = cmdline + ['--output', self.__bin_directory]
if runtime_identifier:
cmdline = cmdline + ['--runtime', runtime_identifier]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
else: # Only build specified frameworks
for target_framework_moniker in target_framework_monikers:
cmdline = [
'dotnet', 'build',
self.csproj_file,
'--configuration', configuration,
'--framework', target_framework_moniker,
'--no-restore',
"/p:NuGetPackageRoot={}".format(packages_path),
]
if output_to_bindir:
cmdline = cmdline + ['--output', self.__bin_directory]
if runtime_identifier:
cmdline = cmdline + ['--runtime', runtime_identifier]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
@staticmethod
def new(template: str,
output_dir: str,
bin_dir: str,
verbose: bool,
working_directory: str,
force: bool = False,
exename: str = None,
language: str = None
):
'''
Creates a new project with the specified template
'''
cmdline = [
'dotnet', 'new',
template,
'--output', output_dir,
'--no-restore'
]
if force:
cmdline += ['--force']
if exename:
cmdline += ['--name', exename]
if language:
cmdline += ['--language', language]
RunCommand(cmdline, verbose=verbose).run(
working_directory
)
# the file could be any project type. let's guess.
project_type = 'csproj'
if language == 'vb':
project_type = 'vbproj'
return CSharpProject(CSharpProjFile(path.join(output_dir, '%s.%s' % (exename or output_dir, project_type)),
working_directory),
bin_dir)
def publish(self,
configuration: str,
output_dir: str,
verbose: bool,
packages_path,
target_framework_moniker: str = None,
runtime_identifier: str = None,
*args
) -> None:
'''
Invokes publish on the specified project
'''
cmdline = [
'dotnet', 'publish',
self.csproj_file,
'--configuration', configuration,
'--output', output_dir,
"/p:NuGetPackageRoot={}".format(packages_path)
]
if runtime_identifier:
cmdline += ['--runtime', runtime_identifier]
if target_framework_moniker:
cmdline += ['--framework', target_framework_moniker]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory
)
@staticmethod
def __print_complus_environment() -> None:
getLogger().info('-' * 50)
getLogger().info('Dumping COMPlus environment:')
COMPLUS_PREFIX = 'COMPlus'
for env in environ:
if env[:len(COMPLUS_PREFIX)].lower() == COMPLUS_PREFIX.lower():
getLogger().info(' "%s=%s"', env, environ[env])
getLogger().info('-' * 50)
def run(self,
configuration: str,
target_framework_moniker: str,
verbose: bool,
*args) -> None:
'''
Calls dotnet to run a .NET project output.
'''
CSharpProject.__print_complus_environment()
cmdline = [
'dotnet', 'run',
'--project', self.csproj_file,
'--configuration', configuration,
'--framework', target_framework_moniker,
'--no-restore', '--no-build',
]
if args:
cmdline = cmdline + list(args)
RunCommand(cmdline, verbose=verbose).run(
self.working_directory)
def get_framework_version(framework: str) -> str:
groups = search(r"^netcoreapp(\d)\.(\d)$", framework)
if not groups:
raise ValueError("Unknown target framework: {}".format(framework))
FrameworkVersion = namedtuple('FrameworkVersion', ['major', 'minor'])
version = FrameworkVersion(int(groups.group(1)), int(groups.group(2)))
return version
def get_base_path(dotnet_path: str = None) -> str:
"""Gets the dotnet Host version from the `dotnet --info` command."""
if not dotnet_path:
dotnet_path = 'dotnet'
output = check_output([dotnet_path, '--info'])
for line in output.splitlines():
decoded_line = line.decode('utf-8')
# The .NET Command Line Tools `--info` had a different output in 2.0
# This line seems commons in all Cli, so we can use the base path to
# get information about the .NET SDK/Runtime
groups = search(r"^ +Base Path\: +(.+)$", decoded_line)
if groups:
break
if not groups:
raise RuntimeError(
'Did not find "Base Path:" entry on the `dotnet --info` command'
)
return groups.group(1)
def get_sdk_path(dotnet_path: str = None) -> str:
base_path = get_base_path(dotnet_path)
sdk_path = path.abspath(path.join(base_path, '..'))
return sdk_path
def get_dotnet_path() -> str:
base_path = get_base_path(None)
dotnet_path = path.abspath(path.join(base_path, '..', '..'))
return dotnet_path
def get_dotnet_version(
framework: str,
dotnet_path: str = None,
sdk_path: str = None) -> str:
version = get_framework_version(framework)
sdk_path = get_sdk_path(dotnet_path) if sdk_path is None else sdk_path
sdks = [
d for d in listdir(sdk_path) if path.isdir(path.join(sdk_path, d))
]
sdks.sort(reverse=True)
# Determine the SDK being used.
# Attempt 1: Try to use exact match.
sdk = next((f for f in sdks if f.startswith(
"{}.{}".format(version.major, version.minor))), None)
if not sdk:
# Attempt 2: Increase the minor version by 1 and retry.
sdk = next((f for f in sdks if f.startswith(
"{}.{}".format(version.major, version.minor + 1))), None)
if not sdk:
sdk = next((f for f in sdks if f.startswith(
"{}.{}".format('5', '0'))), None)
if not sdk:
raise RuntimeError(
"Unable to determine the .NET SDK used for {}".format(framework)
)
return sdk
def get_dotnet_sdk(
framework: str,
dotnet_path: str = None,
sdk: str = None) -> str:
"""Gets the dotnet Host commit sha from the `dotnet --info` command."""
sdk_path = get_sdk_path(dotnet_path)
sdk = get_dotnet_version(framework, dotnet_path,
sdk_path) if sdk is None else sdk
with open(path.join(sdk_path, sdk, '.version')) as sdk_version_file:
return sdk_version_file.readline().strip()
raise RuntimeError("Unable to retrieve information about the .NET SDK.")
def get_repository(repository: str) -> Tuple[str, str]:
url_path = urlparse(repository).path
tokens = url_path.split("/")
if len(tokens) != 3:
raise ValueError('Unable to determine owner and repo from url.')
owner = tokens[1]
repo = tokens[2]
return owner, repo
def get_commit_date(
framework: str,
commit_sha: str,
repository: str = None
) -> str:
'''
Gets the .NET Core committer date using the GitHub Web API from the
repository.
'''
if not framework:
raise ValueError('Target framework was not defined.')
if not commit_sha:
raise ValueError('.NET Commit sha was not defined.')
url = None
urlformat = 'https://api.github.com/repos/%s/%s/commits/%s'
if repository is None:
# The origin of the repo where the commit belongs to has changed
# between release. Here we attempt to naively guess the repo.
core_sdk_frameworks = ['netcoreapp3.0', 'netcoreapp3.1', 'netcoreapp5.0']
repo = 'core-sdk' if framework in core_sdk_frameworks else 'cli'
url = urlformat % ('dotnet', repo, commit_sha)
else:
owner, repo = get_repository(repository)
url = urlformat % (owner, repo, commit_sha)
build_timestamp = None
with urlopen(url) as response:
getLogger().info("Commit: %s", url)
item = loads(response.read().decode('utf-8'))
build_timestamp = item['commit']['committer']['date']
if not build_timestamp:
raise RuntimeError(
'Could not get timestamp for commit %s' % commit_sha)
return build_timestamp
def get_build_directory(
bin_directory: str,
project_name: str,
configuration: str,
target_framework_moniker: str) -> None:
'''
Gets the output directory where the built artifacts are in with
respect to the specified bin_directory.
'''
with push_dir(bin_directory):
return path.join(
bin_directory,
__find_build_directory(
configuration=configuration,
project_name=project_name,
target_framework_moniker=target_framework_moniker,
)
)
def __find_build_directory(
configuration: str,
project_name: str,
target_framework_moniker: str) -> str:
'''
Attempts to get the output directory where the built artifacts are in
with respect to the current working directory.
'''
pattern = '**/{ProjectName}/**/{Configuration}/{TargetFramework}'.format(
ProjectName=project_name,
Configuration=configuration,
TargetFramework=target_framework_moniker
)
for path_name in iglob(pattern, recursive=True):
if path.isdir(path_name):
return path_name
raise ValueError(
'Unable to determine directory for the specified pattern.')
def __get_directory(architecture: str) -> str:
'''Gets the default directory where dotnet is to be installed.'''
return path.join(get_tools_directory(), 'dotnet', architecture)
def remove_dotnet(architecture: str) -> str:
'''
Removes the dotnet installed in the tools directory associated with the
specified architecture.
'''
rmtree(__get_directory(architecture))
def shutdown_server(verbose:bool) -> None:
'''
Shuts down the dotnet server
'''
cmdline = [
'dotnet', 'build-server', 'shutdown'
]
RunCommand(cmdline, verbose=verbose).run(
get_repo_root_path())
def install(
architecture: str,
channels: list,
versions: str,
verbose: bool,
install_dir: str = None) -> None:
'''
Downloads dotnet cli into the tools folder.
'''
__log_script_header("Downloading DotNet Cli")
if not install_dir:
install_dir = __get_directory(architecture)
if not path.exists(install_dir):
makedirs(install_dir)
getLogger().info("DotNet Install Path: '%s'", install_dir)
# Download appropriate dotnet install script
dotnetInstallScriptExtension = '.ps1' if platform == 'win32' else '.sh'
dotnetInstallScriptName = 'dotnet-install' + dotnetInstallScriptExtension
url = 'https://dot.net/v1/'
dotnetInstallScriptUrl = url + dotnetInstallScriptName
dotnetInstallScriptPath = path.join(install_dir, dotnetInstallScriptName)
getLogger().info('Downloading %s', dotnetInstallScriptUrl)
urlretrieve(dotnetInstallScriptUrl, dotnetInstallScriptPath)
if platform != 'win32':
chmod(dotnetInstallScriptPath, S_IRWXU)
dotnetInstallInterpreter = [
'powershell.exe',
'-NoProfile',
'-ExecutionPolicy', 'Bypass',
dotnetInstallScriptPath
] if platform == 'win32' else [dotnetInstallScriptPath]
# If Version is supplied, pull down the specified version
common_cmdline_args = dotnetInstallInterpreter + [
'-InstallDir', install_dir,
'-Architecture', architecture
]
# Install Runtime/SDKs
if versions:
for version in versions:
cmdline_args = common_cmdline_args + ['-Version', version]
RunCommand(cmdline_args, verbose=verbose).run(
get_repo_root_path()
)
# Only check channels if versions are not supplied.
# When we supply a version, but still pull down with -Channel, we will use
# whichever sdk is newer. So if we are trying to check an older version,
# or if there is a new version between when we start a run and when we actually
# run, we will be testing the "wrong" version, ie, not the version we specified.
if (not versions) and channels:
for channel in channels:
cmdline_args = common_cmdline_args + ['-Channel', channel]
RunCommand(cmdline_args, verbose=verbose).run(
get_repo_root_path()
)
# Set DotNet Cli environment variables.
environ['DOTNET_CLI_TELEMETRY_OPTOUT'] = '1'
environ['DOTNET_MULTILEVEL_LOOKUP'] = '0'
environ['UseSharedCompilation'] = 'false'
environ['DOTNET_ROOT'] = install_dir
# Add installed dotnet cli to PATH
environ["PATH"] = install_dir + pathsep + environ["PATH"]
# If we have copied dotnet from a different machine, then it may not be
# marked as executable. Fix this.
if platform != 'win32':
chmod(path.join(install_dir, 'dotnet'), S_IRWXU)
def __add_arguments(parser: ArgumentParser) -> ArgumentParser:
'''
Adds new arguments to the specified ArgumentParser object.
'''
if not isinstance(parser, ArgumentParser):
raise TypeError('Invalid parser.')
SUPPORTED_ARCHITECTURES = [
'x64', # Default architecture
'x86',
'arm',
'arm64',
]
parser.add_argument(
'--architecture',
dest='architecture',
required=False,
default=SUPPORTED_ARCHITECTURES[0],
choices=SUPPORTED_ARCHITECTURES,
help='Architecture of DotNet Cli binaries to be installed.'
)
parser.add_argument(
'--dotnet-versions',
dest="dotnet_versions",
required=False,
nargs='+',
default=[],
action=VersionsAction,
help='Version of the dotnet cli to install in the A.B.C format'
)
return parser
def add_arguments(parser: ArgumentParser) -> ArgumentParser:
'''
Adds new arguments to the specified ArgumentParser object.
'''
parser = __add_arguments(parser)
# .NET Compilation modes.
parser.add_argument(
'--dotnet-compilation-mode',
dest='dotnet_compilation_mode',
required=False,
action=CompilationAction,
choices=CompilationAction.modes(),
default=CompilationAction.noenv(),
type=CompilationAction.validate,
help='{}'.format(CompilationAction.help_text())
)
return parser
def __process_arguments(args: list):
parser = ArgumentParser(
description='DotNet Cli wrapper.',
allow_abbrev=False
)
subparsers = parser.add_subparsers(
title='Subcommands',
description='Supported DotNet Cli subcommands',
dest='install',
)
subparsers.required = True
install_parser = subparsers.add_parser(
'install',
allow_abbrev=False,
help='Installs dotnet cli',
)
install_parser.add_argument(
'--channels',
dest='channels',
required=False,
nargs='+',
default=['master'],
choices= ChannelMap.get_supported_channels(),
help='Download DotNet Cli from the Channel specified.'
)
install_parser = __add_arguments(install_parser)
# private install arguments.
install_parser.add_argument(
'--install-dir',
dest='install_dir',
required=False,
type=str,
help='''Path to where to install dotnet. Note that binaries will be '''
'''placed directly in a given directory.''',
)
install_parser.add_argument(
'-v', '--verbose',
required=False,
default=False,
action='store_true',
help='Turns on verbosity (default "False")',
)
return parser.parse_args(args)
def __main(args: list) -> int:
validate_supported_runtime()
args = __process_arguments(args)
setup_loggers(verbose=args.verbose)
install(
architecture=args.architecture,
channels=args.channels,
versions=args.dotnet_versions,
verbose=args.verbose,
install_dir=args.install_dir,
)
if __name__ == "__main__":
__main(argv[1:])
|
the-stack_0_17919 | from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('',views.IndexView.as_view(), name="index"),
path('<int:pk>/', views.DetailView.as_view(), name="detail"),
path('<int:pk>/results/', views.ResultsView.as_view(), name="results"),
path('<int:question_id>/vote/', views.vote, name="vote"),
] |
the-stack_0_17920 | import psycopg2
import psycopg2.extras
from Infrastructure import log
logger = log.get_logger("Postgres")
class Connector:
def __init__(self, config):
self.host = config['hostname']
self.database = config['database']
self.user = config['username']
self.password = config['password']
self.connection = None
def connect(self):
i = 1
while not self.connection:
try:
self.connection = psycopg2.connect(host=self.host,
database=self.database,
user=self.user,
password=self.password)
except Exception as e:
i += 1
logger.info("Error postgres connection " + str(e))
logger.info("Connect postgres " + str(i))
if i > 10:
break
def execute_with_results(self, query, params={}, as_dict=False):
query = query.format(**params)
self.connect()
if as_dict:
cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
else:
cursor = self.connection.cursor()
cursor.execute(query)
data = cursor.fetchall()
self.connection.commit()
cursor.close()
self.close()
if as_dict:
data = list(map(lambda r: dict(r), data))
return data
def execute_with_results_generic(self, query):
self.connect()
cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute(query)
rowcount = cursor.rowcount
try:
data = list(cursor.fetchall())
except Exception as ex:
data = []
self.connection.commit()
cursor.close()
return [data, rowcount]
def execute_multiple_queries_select_dict_response(self, store_procedure, params={}):
procedure = open(store_procedure, 'r').read()
sql_command = procedure.format(**params)
sqllist = sql_command.split(";")[:-1]
selects = []
for sql_c in sqllist:
selected = self.execute_with_results_generic(sql_c)
selects.append(selected)
return selects
def close(self):
if self.connection:
self.connection.close()
self.connection = None
|
the-stack_0_17922 | import pandas as pd
from bokeh.io import output_file, show
from bokeh.models import (BasicTicker, ColorBar, ColumnDataSource,
LinearColorMapper, PrintfTickFormatter,)
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
from bokeh.transform import transform
output_file("unemploymemt.html")
data.Year = data.Year.astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
source = ColumnDataSource(df)
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
p = figure(plot_width=800, plot_height=300, title="US unemployment 1948—2016",
x_range=list(data.index), y_range=list(reversed(data.columns)),
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Year", y="Month", width=1, height=1, source=source,
line_color=None, fill_color=transform('rate', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "7px"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
show(p)
|
the-stack_0_17923 | from __future__ import absolute_import
import functools
import logging
import posixpath
import six
from threading import Lock
import rb
from django.utils.functional import SimpleLazyObject
from pkg_resources import resource_string
from redis.client import Script, StrictRedis
from redis.connection import ConnectionPool
from redis.exceptions import ConnectionError, BusyLoadingError
from rediscluster import StrictRedisCluster
from sentry import options
from sentry.exceptions import InvalidConfiguration
from sentry.utils import warnings
from sentry.utils.warnings import DeprecatedSettingWarning
from sentry.utils.versioning import Version, check_versions
from sentry.utils.compat import map
logger = logging.getLogger(__name__)
_pool_cache = {}
_pool_lock = Lock()
def _shared_pool(**opts):
if "host" in opts:
key = "%s:%s/%s" % (opts["host"], opts["port"], opts["db"])
else:
key = "%s/%s" % (opts["path"], opts["db"])
pool = _pool_cache.get(key)
if pool is not None:
return pool
with _pool_lock:
pool = _pool_cache.get(key)
if pool is not None:
return pool
pool = ConnectionPool(**opts)
_pool_cache[key] = pool
return pool
_make_rb_cluster = functools.partial(rb.Cluster, pool_cls=_shared_pool)
def make_rb_cluster(*args, **kwargs):
# This uses the standard library `warnings`, since this is provided for
# plugin compatibility but isn't actionable by the system administrator.
import warnings
warnings.warn(
"Direct Redis cluster construction is deprecated, please use named clusters. "
"Direct cluster construction will be removed in Sentry 8.5.",
DeprecationWarning,
)
return _make_rb_cluster(*args, **kwargs)
class _RBCluster(object):
def supports(self, config):
return not config.get("is_redis_cluster", False)
def factory(self, **config):
# rb expects a dict of { host, port } dicts where the key is the host
# ID. Coerce the configuration into the correct format if necessary.
hosts = config["hosts"]
hosts = {k: v for k, v in enumerate(hosts)} if isinstance(hosts, list) else hosts
config["hosts"] = hosts
return _make_rb_cluster(**config)
def __str__(self):
return "Redis Blaster Cluster"
class RetryingStrictRedisCluster(StrictRedisCluster):
"""
Execute a command with cluster reinitialization retry logic.
Should a cluster respond with a ConnectionError or BusyLoadingError the
cluster nodes list will be reinitialized and the command will be executed
again with the most up to date view of the world.
"""
def execute_command(self, *args, **kwargs):
try:
return super(self.__class__, self).execute_command(*args, **kwargs)
except (
ConnectionError,
BusyLoadingError,
KeyError, # see: https://github.com/Grokzen/redis-py-cluster/issues/287
):
self.connection_pool.nodes.reset()
return super(self.__class__, self).execute_command(*args, **kwargs)
class _RedisCluster(object):
def supports(self, config):
# _RedisCluster supports two configurations:
# * Explicitly configured with is_redis_cluster. This mode is for real redis-cluster.
# * No is_redis_cluster, but only 1 host. This represents a singular node Redis running
# in non-cluster mode.
return config.get("is_redis_cluster", False) or len(config.get("hosts")) == 1
def factory(self, **config):
# StrictRedisCluster expects a list of { host, port } dicts. Coerce the
# configuration into the correct format if necessary.
hosts = config.get("hosts")
# TODO(joshuarli): modernize dict_six fixer
hosts = list(hosts.values()) if isinstance(hosts, dict) else hosts
# Redis cluster does not wait to attempt to connect. We'd prefer to not
# make TCP connections on boot. Wrap the client in a lazy proxy object.
def cluster_factory():
if config.get("is_redis_cluster", False):
return RetryingStrictRedisCluster(
startup_nodes=hosts,
decode_responses=True,
skip_full_coverage_check=True,
max_connections=16,
max_connections_per_node=True,
)
else:
host = hosts[0].copy()
host["decode_responses"] = True
return StrictRedis(**host)
return SimpleLazyObject(cluster_factory)
def __str__(self):
return "Redis Cluster"
class ClusterManager(object):
def __init__(self, options_manager, cluster_type=_RBCluster):
self.__clusters = {}
self.__options_manager = options_manager
self.__cluster_type = cluster_type()
def get(self, key):
cluster = self.__clusters.get(key)
if cluster:
return cluster
# TODO: This would probably be safer with a lock, but I'm not sure
# that it's necessary.
configuration = self.__options_manager.get("redis.clusters").get(key)
if configuration is None:
raise KeyError(u"Invalid cluster name: {}".format(key))
if not self.__cluster_type.supports(configuration):
raise KeyError(u"Invalid cluster type, expected: {}".format(self.__cluster_type))
cluster = self.__clusters[key] = self.__cluster_type.factory(**configuration)
return cluster
# TODO(epurkhiser): When migration of all rb cluster to true redis clusters has
# completed, remove the rb ``clusters`` module variable and rename
# redis_clusters to clusters.
clusters = ClusterManager(options.default_manager)
redis_clusters = ClusterManager(options.default_manager, _RedisCluster)
def get_cluster_from_options(setting, options, cluster_manager=clusters):
cluster_option_name = "cluster"
default_cluster_name = "default"
cluster_constructor_option_names = frozenset(("hosts",))
options = options.copy()
cluster_options = {
key: options.pop(key)
for key in set(options.keys()).intersection(cluster_constructor_option_names)
}
if cluster_options:
if cluster_option_name in options:
raise InvalidConfiguration(
u"Cannot provide both named cluster ({!r}) and cluster configuration ({}) options.".format(
cluster_option_name, ", ".join(map(repr, cluster_constructor_option_names))
)
)
else:
warnings.warn(
DeprecatedSettingWarning(
u"{} parameter of {}".format(
", ".join(map(repr, cluster_constructor_option_names)), setting
),
u'{}["{}"]'.format(setting, cluster_option_name),
removed_in_version="8.5",
),
stacklevel=2,
)
cluster = rb.Cluster(pool_cls=_shared_pool, **cluster_options)
else:
cluster = cluster_manager.get(options.pop(cluster_option_name, default_cluster_name))
return cluster, options
def get_dynamic_cluster_from_options(setting, config):
cluster_name = config.get("cluster", "default")
cluster_opts = options.default_manager.get("redis.clusters").get(cluster_name)
if cluster_opts is not None and cluster_opts.get("is_redis_cluster"):
# RedisCluster
return True, redis_clusters.get(cluster_name), config
# RBCluster
return (False,) + get_cluster_from_options(setting, config)
def validate_dynamic_cluster(is_redis_cluster, cluster):
try:
if is_redis_cluster:
cluster.ping()
else:
with cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(six.text_type(e))
def check_cluster_versions(cluster, required, recommended=None, label=None):
try:
with cluster.all() as client:
results = client.info()
except Exception as e:
# Any connection issues should be caught here.
raise InvalidConfiguration(six.text_type(e))
versions = {}
for id, info in results.value.items():
host = cluster.hosts[id]
# NOTE: This assumes there is no routing magic going on here, and
# all requests to this host are being served by the same database.
key = u"{host}:{port}".format(host=host.host, port=host.port)
versions[key] = Version(map(int, info["redis_version"].split(".", 3)))
check_versions(
"Redis" if label is None else "Redis (%s)" % (label,), versions, required, recommended
)
def load_script(path):
script = Script(None, resource_string("sentry", posixpath.join("scripts", path)))
# This changes the argument order of the ``Script.__call__`` method to
# encourage using the script with a specific Redis client, rather
# than implicitly using the first client that the script was registered
# with. (This can prevent lots of bizarre behavior when dealing with
# clusters of Redis servers.)
def call_script(client, keys, args):
u"""
Executes {!r} as a Lua script on a Redis server.
Takes the client to execute the script on as the first argument,
followed by the values that will be provided as ``KEYS`` and ``ARGV``
to the script as two sequence arguments.
""".format(
path
)
return script(keys, args, client)
return call_script
|
the-stack_0_17925 | if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
import os
import argparse
from mnist_model import MnistModel
from common.TFLearn.optimizer import Optimizer
from utct.TFLearn.converter import Converter
def parse_args():
parser = argparse.ArgumentParser(
description='Export TFLearn model parameters to h5 file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--checkpoint-dir',
dest='checkpoint_dir',
help='Path to checkpoint files',
required=True,
type=str)
parser.add_argument(
'--file',
dest='file_name',
help='File name of checkpoint file',
required=True,
type=str)
parser.add_argument(
'--output',
dest='dst_filepath',
help='Output file for TFLearn model parameters',
required=True,
type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
model = MnistModel()
optimizer = Optimizer()
Converter.export_to_h5(
model=model,
optimizer=optimizer,
checkpoint_path=os.path.join(args.checkpoint_dir, args.file_name),
dst_filepath=args.dst_filepath)
if __name__ == '__main__':
main()
|
the-stack_0_17926 | from flask import Flask, request, render_template
import json
import numpy as np
import pandas as pd
import nltk
import networkx
from nltk.tokenize import sent_tokenize
from sklearn.metrics.pairwise import cosine_similarity
from nltk.corpus import stopwords
import jinja2
jinja_environment = jinja2.Environment(autoescape=True,loader=jinja2.FileSystemLoader('templates'))
nltk.download('punkt') # one time execution
nltk.download('stopwords')
app = Flask(__name__)
@app.route('/')
def static_page():
return render_template('index.html')
def script(): # single domain multiple documentation article
# single domain multiple documentation article
df = pd.read_csv(r'C:\Users\samanvayvajpayee\Downloads\tennis_articles_v4.csv', encoding='utf-8')
sentences = []
for s in df['article_text']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x]
# Extract word vectors
# GloVe- word embeddings are vector representation of words.
# using GloVe also for maintaining the order
word_embeddings = {}
# f = open(r'Desktop\textrank\glove.6B.100d.txt', encoding='utf-8') #Download glove.6B.100d.txt embedding and replace the file address accordingly
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
word_embeddings[word] = coefs
f.close()
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
# make alphabets lowercase
clean_sentences = [s.lower() for s in clean_sentences]
# function to remove stopwords
def remove_stop_words(sen, lang='English'):
stop_words = stopwords.words(lang)
sentence_new = " ".join([i for i in sen if i not in stop_words])
return sentence_new
# remove stopwords
clean_sentences = [remove_stop_words(r.split()) for r in clean_sentences]
# create a word-vector each with size 100 of each sentence
sentence_vectors = []
for sen in clean_sentences:
if len(sen) != 0:
v = sum([word_embeddings.get(w, np.zeros((100,))) for w in sen.split()])/(len(sen.split())+0.001)
else:
v = np.zeros((100,))
sentence_vectors.append(v)
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
# cosine similarity to check similarity between sentences
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1, 100),
sentence_vectors[j].reshape(1, 100))[0, 0]
# making a graph by applying pageRank algo
nx_graph = networkx.from_numpy_array(sim_mat)
scores = networkx.pagerank(nx_graph)
ranked_scores = sorted(((scores[i], s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 2 sentences as the summary
for i in range(3):
s+=(ranked_scores[i][1])
return s
summ=""
@app.route("/script", methods=['GET','POST'])
def summarize():
#if request.method == 'GET':
# input_string = request.form['text']
#if request.method == 'POST':
# request.form['sum']
summ = script()
return render_template('index.html', summary=summ)
if __name__ == "__main__":
app.run()
|
the-stack_0_17927 | """Tests for the plots.py submodule."""
import asyncio
from datetime import datetime
from pathlib import Path
from lsw_slackbot import plots
async def test_plot_resource_use(aggregation_level=None, dpi=100):
"""Tests plots.plot_resource_use"""
await plots.plot_resource_use(Path("test_data"),
Path(f"test_plots/stack_{aggregation_level}.png"),
datetime(2020, 1, 1, 12, 53),
end_time=datetime(2020, 1, 2, 7, 4),
aggregation_level=aggregation_level, dpi=dpi)
async def test_plot_resource_use_all_aggregation_levels(
levels_to_try=(None, "minute", "hour", "day", "week", "month", "year"), dpi=100):
"""Runs test_plot_resource_use at every available aggregation level."""
for a_level in levels_to_try:
print(f"Plotting level {a_level}")
await test_plot_resource_use(a_level, dpi=dpi)
if __name__ == "__main__":
asyncio.run(test_plot_resource_use_all_aggregation_levels(dpi=300))
|
the-stack_0_17931 | ################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import os
import pandas
from functools import partial, wraps
from random import random
from .view_config import ViewConfig
from ._data_formatter import to_format, _parse_format_options
from ._constants import COLUMN_SEPARATOR_STRING
from ._utils import _str_to_pythontype
from ._callback_cache import _PerspectiveCallBackCache
from ._date_validator import _PerspectiveDateValidator
from .libbinding import (
make_view_unit,
make_view_zero,
make_view_one,
make_view_two,
to_arrow_unit,
to_arrow_zero,
to_arrow_one,
to_arrow_two,
get_row_delta_unit,
get_row_delta_zero,
get_row_delta_one,
get_row_delta_two,
)
class View(object):
"""A :class:`~perspective.View` object represents a specific transform
(pivot, filter, sort, etc) configuration on an underlying
:class:`~perspective.Table`. :class:`~perspective.View` objects
cannot be directly instantiated - they must be derived from an existing
:class:`~perspective.Table` via the :func:`~perspective.Table.view()`
method.
:class:`~perspective.View` instances receive all updates from the
:class:`~perspective.Table` from which they are derived, and can be
serialized (via ``to_*`` methods) or trigger a callback when it is updated.
:class:`~perspective.View` objects will remain in memory and actively
process updates until :obj:`~perspective.View.delete()` method is called.
"""
def __init__(self, Table, **kwargs):
self._name = "py_" + str(random())
self._table = Table
self._config = ViewConfig(**kwargs)
self._sides = self.sides()
date_validator = _PerspectiveDateValidator()
self._is_unit_context = (
self._table._index == ""
and self._sides == 0
and len(self._config.get_row_pivots()) == 0
and len(self._config.get_column_pivots()) == 0
and len(self._config.get_filter()) == 0
and len(self._config.get_sort()) == 0
and len(self._config.get_computed_columns()) == 0
)
if self._is_unit_context:
self._view = make_view_unit(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
elif self._sides == 0:
self._view = make_view_zero(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
elif self._sides == 1:
self._view = make_view_one(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
else:
self._view = make_view_two(
self._table._table,
self._name,
COLUMN_SEPARATOR_STRING,
self._config,
date_validator,
)
self._column_only = self._view.is_column_only()
self._update_callbacks = self._table._update_callbacks
self._delete_callbacks = _PerspectiveCallBackCache()
self._client_id = None
def get_config(self):
"""Returns a copy of the immutable configuration ``kwargs`` from which
this :class:`~perspective.View` was instantiated.
Returns:
:obj:`dict`: ``kwargs`` supplied to the
:func:`perspective.Table.view()` method.
"""
return self._config.get_config()
def sides(self):
"""An integer representing the # of hierarchial axis on this
:class:`~perspective.View`.
0 - Neither ``row_pivots`` nor ``column_pivots`` properties are set.
1 - ``row_pivots`` is set.
2 - ``column_pivots`` is set (and also maybe ``row_pivots``).
Returns:
:obj:`int`: 0 <= N <= 2
"""
if (
len(self._config.get_row_pivots()) > 0
or len(self._config.get_column_pivots()) > 0
):
if len(self._config.get_column_pivots()) > 0:
return 2
else:
return 1
else:
return 0
def num_rows(self):
"""The number of aggregated rows in the :class:`~perspective.View`.
This count includes the total aggregate rows for all ``row_pivots``
depth levels, and can also be affected by any applied ``filter``.
Returns:
:obj:`int`: Number of rows.
"""
return self._view.num_rows()
def num_columns(self):
"""The number of aggregated columns in the :class:`~perspective.View`.
This is affected by the ``column_pivots`` that are applied to the
:class:`~perspective.View`.
Returns:
:obj:`int`: Number of columns.
"""
return self._view.num_columns()
def get_row_expanded(self, idx):
"""Returns whether row at `idx` is expanded or collapsed.
Returns:
:obj:`bool`: Is this row expanded?
"""
return self._view.get_row_expanded(idx)
def expand(self, idx):
"""Expands the row at 'idx', i.e. displaying its leaf rows.
Args:
idx (:obj:`int`): Row index to expand.
"""
return self._view.expand(idx, len(self._config.get_row_pivots()))
def collapse(self, idx):
"""Collapses the row at 'idx', i.e. hiding its leaf rows.
Args:
idx (:obj:`int`): Row index to collapse.
"""
return self._view.collapse(idx)
def set_depth(self, depth):
"""Sets the expansion depth of the pivot tree.
Args:
depth (:obj:`int`): Depth to collapse all nodes to, which
may be no greater then the length of the ``row_pivots``
property.
"""
return self._view.set_depth(depth, len(self._config.get_row_pivots()))
def column_paths(self):
"""Returns the names of the columns as they show in the
:class:`~perspective.View`, i.e. the hierarchial columns when
``column_pivots`` is applied.
Returns:
:obj:`list` of :obj`str`: Aggregated column names.
"""
paths = self._view.column_paths()
string_paths = []
for path in paths:
string_paths.append(
COLUMN_SEPARATOR_STRING.join([p.to_string(False) for p in path])
)
return string_paths
def schema(self, as_string=False):
"""The schema of this :class:`~perspective.View`, which is a key-value
map that contains the column names and their Python data types.
If the columns are aggregated, their aggregated types will be shown
returned instead.
Keyword Args:
as_string (:obj:`bool`): returns data types as string
representations, if ``True``.
Returns:
:obj:`dict`: A map of :obj:`str` column name to :obj:`str` or
:obj:`type`, depending on the value of ``as_string`` kwarg.
"""
if as_string:
return {item[0]: item[1] for item in self._view.schema().items()}
return {
item[0]: _str_to_pythontype(item[1]) for item in self._view.schema().items()
}
def computed_schema(self, as_string=False):
if as_string:
return {item[0]: item[1] for item in self._view.computed_schema().items()}
return {
item[0]: _str_to_pythontype(item[1])
for item in self._view.computed_schema().items()
}
def on_update(self, callback, mode=None):
"""Add a callback to be fired when :func:`perspective.Table.update()` is
called on the parent :class:`~perspective.Table`.
Multiple callbacks can be set through calling ``on_update`` multiple
times, and will be called in the order they are set. Callback must be a
callable function that takes exactly 1 or 2 parameters, depending on
whether `on_update` is called with `mode="row"`. The first parameter is
always `port_id`, an :obj:`int` that indicates which input port the
update comes from. A `RuntimeError` will be thrown if the callback
has mis-configured parameters.
Args:
callback (:obj:`callable`): a callable function reference that will
be called when :func:`perspective.Table.update()` is called.
mode (:obj:`str`): if set to "row", the callback will be passed
an Arrow-serialized dataset of the rows that were updated.
Defaults to "none".
Examples:
>>> def updater(port_id):
... print("Update fired on port", port_id)
>>> def updater_with_delta(port_id, delta):
... print("Update on port", port_id, "delta len:", len(delta)))
>>> view.on_update(updater)
>>> view.on_update(updater, mode="row")
>>> table.update({"a": [1]})'
>>> Update fired on port 0
>>> Update on port 0 delta len: 64
"""
self._table._state_manager.call_process(self._table._table.get_id())
mode = mode or "none"
if not callable(callback):
raise ValueError("Invalid callback - must be a callable function")
if mode not in ["none", "row"]:
raise ValueError(
'Invalid update mode {} - valid on_update modes are "none" or "row"'.format(
mode
)
)
if mode == "row":
if not self._view._get_deltas_enabled():
self._view._set_deltas_enabled(True)
wrapped_callback = partial(
self._wrapped_on_update_callback, mode=mode, callback=callback
)
self._update_callbacks.add_callback(
{
"name": self._name,
"orig_callback": callback,
"callback": wrapped_callback,
}
)
def remove_update(self, callback):
"""Given a callback function, remove it from the list of callbacks.
Args:
callback (:obj:`func`): a function reference that will be removed.
Examples:
>>> table = perspective.Table(data)
>>> view = table.view()
>>> view2 = table.view()
>>> def callback():
... print("called!")
>>> view.on_update(callback)
>>> view2.on_update(callback)
>>> table.update(new_data)
called!
>>> view2.remove_update(callback)
>>> table.update(new_data) # callback removed and will not fire
"""
self._table._state_manager.call_process(self._table._table.get_id())
if not callable(callback):
return ValueError("remove_update callback should be a callable function!")
self._update_callbacks.remove_callbacks(
lambda cb: cb["orig_callback"] == callback
)
def on_delete(self, callback):
"""Set a callback to be run when the :func:`perspective.View.delete()`
method is called on this :class:`~perspective.View`.
Args:
callback (:obj:`callable`): A callback to run after
:func:`perspective.View.delete()` method has been called.
Examples:
>>> def deleter():
>>> print("Delete called!")
>>> view.on_delete(deleter)
>>> view.delete()
>>> Delete called!
"""
if not callable(callback):
return ValueError("on_delete callback must be a callable function!")
self._delete_callbacks.add_callback(callback)
def delete(self):
"""Delete the :class:`~perspective.View` and clean up all associated
callbacks.
This method must be called to clean up callbacks used by the
:class:`~perspective.View`, as well as allow for deletion of the
underlying :class:`~perspective.Table`.
Examples:
>>> table = perspective.Table(data)
>>> view = table.view()
>>> view.delete()
"""
self._table._state_manager.remove_process(self._table._table.get_id())
self._table._views.pop(self._table._views.index(self._name))
# remove the callbacks associated with this view
self._update_callbacks.remove_callbacks(lambda cb: cb["name"] == self._name)
[cb() for cb in self._delete_callbacks]
def remove_delete(self, callback):
"""Remove the delete callback associated with this
:class:`~perspective.View`.
Args:
callback (:obj:`callable`): A reference to a callable function that
will be removed from delete callbacks.
Examples:
>>> table = perspective.Table(data)
>>> view = table.view()
>>> view2 = table.view()
>>> def callback():
... print("called!")
>>> view.on_delete(callback)
>>> view2.on_delete(callback)
>>> view.delete()
called!
>>> view2.remove_delete(callback)
>>> view2.delete() # callback removed and will not fire
"""
if not callable(callback):
return ValueError("remove_delete callback should be a callable function!")
self._delete_callbacks.remove_callbacks(lambda cb: cb == callback)
def to_arrow(self, **kwargs):
options = _parse_format_options(self, kwargs)
if self._is_unit_context:
return to_arrow_unit(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
elif self._sides == 0:
return to_arrow_zero(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
elif self._sides == 1:
return to_arrow_one(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
else:
return to_arrow_two(
self._view,
options["start_row"],
options["end_row"],
options["start_col"],
options["end_col"],
)
def to_records(self, **kwargs):
"""Serialize the :class:`~perspective.View`'s dataset into a :obj:`list`
of :obj:`dict` containing each row.
By default, the entire dataset is returned, though this can be windowed
via ``kwargs``. When ``row_pivots`` are applied, a ``__ROW_PATH__``
column name will be generated in addition to the applied ``columns``.
When ``column_pivots`` are applied, column names will be qualified
with their column group name.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:obj:`list` of :obj:`dict`: A list of :obj:`dict`, where each dict
represents a row of the current state of the
:class:`~perspective.View`.
"""
return to_format(kwargs, self, "records")
def to_dict(self, **options):
"""Serialize the :class:`~perspective.View`'s dataset into a :obj:`dict`
of :obj:`str` keys and :obj:`list` values. Each key is a column name,
and the associated value is the column's data packed into a :obj:`list`.
If the :class:`~perspective.View` is aggregated, the aggregated dataset
will be returned.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:obj:`dict`: A dictionary with string keys and list values, where
key = column name and value = column values.
"""
return to_format(options, self, "dict")
def to_numpy(self, **options):
"""Serialize the view's dataset into a :obj:`dict` of :obj:`str` keys
and :class:`numpy.array` values. Each key is a column name, and the
associated value is the column's data packed into a numpy array.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:obj:`dict` of :class:`numpy.array`: A dictionary with string keys
and numpy array values, where key = column name and
value = column values.
"""
return to_format(options, self, "numpy")
def to_df(self, **options):
"""Serialize the view's dataset into a pandas dataframe.
If the view is aggregated, the aggregated dataset will be returned.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to ``False``).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to ``False``).
Returns:
:class:`pandas.DataFrame`: A DataFrame serialization of the current
state of this :class:`~perspective.View`.
"""
cols = self.to_numpy(**options)
return pandas.DataFrame(cols)
def to_csv(self, **options):
"""Serialize the :class:`~perspective.View`'s dataset into a CSV string.
Keyword Args:
start_row (:obj:`int`): (Defaults to 0).
end_row (:obj:`int`): (Defaults to
:func:`perspective.View.num_rows()`).
start_col (:obj:`int`): (Defaults to 0).
end_col (:obj:`int`): (Defaults to
:func:`perspective.View.num_columns()`).
id (:obj:`bool`): Whether to return a logical row ID for each
row (Defaults to ``False``).
index (:obj:`bool`): Whether to return an implicit pkey for each
row (Defaults to False).
leaves_only (:obj:`bool`): Whether to return only the data at the
end of the tree (Defaults to False).
date_format (:obj:`str`): How ``datetime`` objects should be
formatted in the CSV.
Returns:
:obj:`str`: A CSV-formatted string containing the serialized data.
"""
date_format = None
# Handle to_csv calls from `<perspective-viewer>`, which uses the
# JavaScript Intl.DateTimeFormat API that takes a locale instead of a
# string format.
# TODO This should move to portable code.
if options.pop("formatted", False):
date_format = "%Y/%m/%d %H:%M:%S"
return self.to_df(**options).to_csv(
date_format=date_format,
line_terminator="\r\n" if os.name == "nt" else "\n",
)
@wraps(to_records)
def to_json(self, **options):
return self.to_records(**options)
@wraps(to_dict)
def to_columns(self, **options):
return self.to_dict(**options)
def _get_row_delta(self):
if self._is_unit_context:
return get_row_delta_unit(self._view)
elif self._sides == 0:
return get_row_delta_zero(self._view)
elif self._sides == 1:
return get_row_delta_one(self._view)
else:
return get_row_delta_two(self._view)
def _num_hidden_cols(self):
"""Returns the number of columns that are sorted but not shown."""
hidden = 0
columns = self._config.get_columns()
for sort in self._config.get_sort():
if sort[0] not in columns:
hidden += 1
return hidden
def _wrapped_on_update_callback(self, **kwargs):
"""Provide the user-defined callback function with additional metadata
from the view.
"""
mode = kwargs["mode"]
port_id = kwargs["port_id"]
cache = kwargs["cache"]
callback = kwargs["callback"]
if cache.get(port_id) is None:
cache[port_id] = {}
if mode == "row":
if cache[port_id].get("row_delta") is None:
cache["row_delta"] = self._get_row_delta()
callback(port_id, cache["row_delta"])
else:
callback(port_id)
|
the-stack_0_17934 | import pandas as pd
import numpy as np
import pickle
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.stem.porter import *
import string
import re
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as VS
from textstat.textstat import *
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import seaborn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.pipeline import Pipeline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# df = pd.read_csv("../data/labeled_data.csv")
df = pd.read_csv("../data/MMHS10K_data.csv")
df_test = pd.read_csv("../data/MMHS10K_test_data.csv")
out_file = open('../../../datasets/HateSPic/HateSPic/davison/MMHS10K_v2mm_testScores.txt','w')
df.describe()
df.columns
df['class'].hist()
tweets=df.tweet
tweets_test=df_test.tweet
# Feature generation
stopwords=stopwords = nltk.corpus.stopwords.words("english")
other_exclusions = ["#ff", "ff", "rt"]
stopwords.extend(other_exclusions)
stemmer = PorterStemmer()
def preprocess(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, '', parsed_text)
parsed_text = re.sub(mention_regex, '', parsed_text)
return parsed_text
def tokenize(tweet):
"""Removes punctuation & excess whitespace, sets to lowercase,
and stems tweets. Returns a list of stemmed tokens."""
tweet = " ".join(re.split("[^a-zA-Z]*", tweet.lower())).strip()
tokens = [stemmer.stem(t) for t in tweet.split()]
return tokens
def basic_tokenize(tweet):
"""Same as tokenize but without the stemming"""
tweet = " ".join(re.split("[^a-zA-Z.,!?]*", tweet.lower())).strip()
return tweet.split()
vectorizer = TfidfVectorizer(
tokenizer=tokenize,
preprocessor=preprocess,
ngram_range=(1, 3),
stop_words=stopwords,
use_idf=True,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=10000,
min_df=5,
max_df=0.75
)
#Construct tfidf matrix and get relevant scores
tfidf = vectorizer.fit_transform(tweets).toarray()
tfidf_test = vectorizer.transform(tweets_test).toarray()
vocab = {v:i for i, v in enumerate(vectorizer.get_feature_names())}
idf_vals = vectorizer.idf_
idf_dict = {i:idf_vals[i] for i in vocab.values()} #keys are indices; values are IDF scores
#Get POS tags for tweets and save as a string
tweet_tags = []
for t in tweets:
tokens = basic_tokenize(preprocess(t))
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
tweet_tags.append(tag_str)
#Get POS tags for tweets and save as a string
tweet_tags_test = []
for t in tweets_test:
tokens = basic_tokenize(preprocess(t))
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
tweet_tags_test.append(tag_str)
#We can use the TFIDF vectorizer to get a token matrix for the POS tags
pos_vectorizer = TfidfVectorizer(
tokenizer=None,
lowercase=False,
preprocessor=None,
ngram_range=(1, 3),
stop_words=None,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=5000,
min_df=5,
max_df=0.75,
)
#Construct POS TF matrix and get vocab dict
pos = pos_vectorizer.fit_transform(pd.Series(tweet_tags)).toarray()
pos_test = pos_vectorizer.transform(pd.Series(tweet_tags_test)).toarray()
pos_vocab = {v:i for i, v in enumerate(pos_vectorizer.get_feature_names())}
# Now get other features
sentiment_analyzer = VS()
def count_twitter_objs(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
4) hashtags with HASHTAGHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned.
Returns counts of urls, mentions, and hashtags.
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
hashtag_regex = '#[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, 'URLHERE', parsed_text)
parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text)
parsed_text = re.sub(hashtag_regex, 'HASHTAGHERE', parsed_text)
return (parsed_text.count('URLHERE'), parsed_text.count('MENTIONHERE'), parsed_text.count('HASHTAGHERE'))
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
sentiment = sentiment_analyzer.polarity_scores(tweet)
words = preprocess(tweet) # Get text only
syllables = textstat.syllable_count(words)
num_chars = sum(len(w) for w in words)
num_chars_total = len(tweet)
num_terms = len(tweet.split())
num_words = len(words.split())
avg_syl = round(float((syllables + 0.001)) / float(num_words + 0.001), 4)
num_unique_terms = len(set(words.split()))
###Modified FK grade, where avg words per sentence is just num words/1
FKRA = round(float(0.39 * float(num_words) / 1.0) + float(11.8 * avg_syl) - 15.59, 1)
##Modified FRE score, where sentence fixed to 1
FRE = round(206.835 - 1.015 * (float(num_words) / 1.0) - (84.6 * float(avg_syl)), 2)
twitter_objs = count_twitter_objs(tweet)
retweet = 0
if "rt" in words:
retweet = 1
features = [FKRA, FRE, syllables, avg_syl, num_chars, num_chars_total, num_terms, num_words,
num_unique_terms, sentiment['neg'], sentiment['pos'], sentiment['neu'], sentiment['compound'],
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet]
# features = pandas.DataFrame(features)
return features
def get_feature_array(tweets):
feats = []
for t in tweets:
feats.append(other_features(t))
return np.array(feats)
other_features_names = ["FKRA", "FRE","num_syllables", "avg_syl_per_word", "num_chars", "num_chars_total", \
"num_terms", "num_words", "num_unique_words", "vader neg","vader pos","vader neu", \
"vader compound", "num_hashtags", "num_mentions", "num_urls", "is_retweet"]
feats = get_feature_array(tweets)
feats_test = get_feature_array(tweets_test)
#Now join them all up
M = np.concatenate([tfidf,pos,feats],axis=1)
M_test = np.concatenate([tfidf_test,pos_test,feats_test],axis=1)
M.shape
M_test.shape
#Finally get a list of variable names
variables = ['']*len(vocab)
for k,v in vocab.items():
variables[v] = k
pos_variables = ['']*len(pos_vocab)
for k,v in pos_vocab.items():
pos_variables[v] = k
feature_names = variables+pos_variables+other_features_names
# Running the model
# The best model was selected using a GridSearch with 5-fold CV.
X = pd.DataFrame(M)
y = df['class'].astype(int)
X_test = pd.DataFrame(M_test)
y_test = df_test['class'].astype(int)
X_train, X_test_notused, y_train, y_test_notused = train_test_split(X, y, random_state=42, test_size=0)
pipe = Pipeline(
[('select', SelectFromModel(LogisticRegression(class_weight='balanced',
penalty="l1", C=0.01))),
('model', LogisticRegression(class_weight='balanced',penalty='l2'))])
param_grid = [{}] # Optionally add parameters here
grid_search = GridSearchCV(pipe,
param_grid,
cv=StratifiedKFold(n_splits=5,
random_state=42).split(X_train, y_train),
verbose=2)
model = grid_search.fit(X_train, y_train)
y_preds = model.predict(X_test)
y_probs = model.predict_proba(X_test)
for c,result in enumerate(y_preds):
tweet_id = df_test['tweet_id'][c]
hate_prob = y_probs[c,0]
not_hate_prob = y_probs[c, 1]
out_file.write(str(tweet_id)+','+str(result)+','+str(hate_prob)+','+str(not_hate_prob)+'\n')
# Evaluating the results
report = classification_report( y_test, y_preds )
print(report) |
the-stack_0_17935 | #!/usr/bin/python
# vim:fileencoding=utf-8
# (c) 2017 Michał Górny <[email protected]>
# Released under the terms of the 2-clause BSD license.
import argparse, os.path
from abc import abstractmethod
from . import PV, get_package_manager
from .exceptions import (AmbiguousPackageSetError, EmptyPackageSetError,
InvalidAtomStringError)
from .submodules import _supported_pms, get_pm
from .util import ABCObject
def _reponame(val):
"""
Check the value for correctness as repository name. In fact, it only ensures
it isn't a path so that it won't confuse pm.repositories[val].
@param val: the config option value
@type val: string
@return: whether the value is a correct repo name
@rtype: bool
"""
if os.path.isabs(val):
raise ValueError('Invalid repository name: %s' % val)
return val
def AtomFormatDict(a):
return {
'key': a.key,
'path': a.path,
'repository': a.repository,
'slot': a.slot,
'subslot': a.subslot,
'version': a.version,
'slotted_atom': a.slotted_atom,
'versioned_atom': a,
'unversioned_atom': a.unversioned_atom,
}
class PMQueryCommand(ABCObject):
""" A single gentoopmq command. """
@classmethod
def help(self):
"""
Return the help string for a sub-command.
@return: the help string
@rtype: string
"""
descdoc = ' '.join(self.__doc__.split())
descdoc = descdoc[0].lower() + descdoc[1:]
return descdoc.rstrip('.')
def __init__(self, argparser):
"""
Instantiate the subcommand, setting argument parser as necessary.
@param argparser: sub-command argument parser
@type argparser: C{argparse.ArgumentParser}
"""
argparser.set_defaults(instance = self)
self._arg = argparser
@abstractmethod
def __call__(self, pm, args):
"""
Call the sub-command, passing pm (a working PackageManager instance)
and args (the result of argument parsing). Can return exit code
for the process if relevant. If it doesn't, 0 will be used.
@param pm: package manager instance
@type pm: L{PackageManager}
@param args: command arguments
@type args: C{argparse.Namespace}
@return: Process exit code or None if irrelevant
@rtype: bool/None
"""
pass
class PMQueryCommands(object):
""" The container of all standard gentoopmq commands. """
# === generic information ===
class package_manager(PMQueryCommand):
"""
Get the name of a working, preferred PM.
"""
def __init__(self, argparser):
PMQueryCommand.__init__(self, argparser)
argparser.add_argument('-v', '--with-version',
action='store_true', dest='version',
help='Print the version as well')
def __call__(self, pm, args):
if args.version:
print('%s %s' % (pm.name, pm.version))
else:
print(pm.name)
# === repository info ===
class repositories(PMQueryCommand):
"""
Print the list of ebuild repositories.
"""
def __call__(self, pm, args):
print(' '.join([r.name for r in pm.repositories]))
class repo_path(PMQueryCommand):
"""
Print the path to the named repository.
"""
def __init__(self, argparser):
PMQueryCommand.__init__(self, argparser)
argparser.add_argument('repo_name', type=_reponame,
help='The repository name to look up')
def __call__(self, pm, args):
try:
r = pm.repositories[args.repo_name]
except KeyError:
self._arg.error('No repository named %s' % args.repo_name)
return 1
print(r.path)
# === package matching ===
class match(PMQueryCommand):
"""
Print packages matching the specified atom.
"""
def __init__(self, argparser):
PMQueryCommand.__init__(self, argparser)
argparser.add_argument('-b', '--best', action='store_true',
help='Print only the best version')
argparser.add_argument('-s', '--best-in-slot', action='store_true',
help='Print the best version in each available slot')
argparser.add_argument('-f', '--format', default='{versioned_atom}',
help=('Output format string (can include: '
+ '{versioned_atom}, {unversioned_atom}, {slotted_atom}, '
+ '{key}, {key.category}, {key.package}, '
+ '{version}, {version.revision}, {version.without_revision}, '
+ '{slot}, {subslot}, {repository}, {path})'))
argparser.add_argument('package_atom', nargs='+',
help='The package atom to match')
def __call__(self, pm, args):
if args.best and args.best_in_slot:
self._arg.error('--best and --best-in-slot are mutually exclusive')
for in_atom in args.package_atom:
try:
a = pm.Atom(in_atom)
except InvalidAtomStringError as e:
self._arg.error(e)
return 1
pkgs = pm.stack.filter(a)
if args.best_in_slot:
pkgs = [pg.best for pg in pkgs.group_by('slotted_atom')]
if args.best:
try:
pkgs = (pkgs.best,)
except AmbiguousPackageSetError:
self._arg.error('Multiple disjoint packages match %s' % in_atom)
return 1
except EmptyPackageSetError:
self._arg.error('No packages match %s' % in_atom)
return 1
for p in pkgs:
print(args.format.format(**AtomFormatDict(p)))
# === shell ===
class shell(PMQueryCommand):
"""
Run a Python shell with current PM selected.
"""
def __call__(self, pm, args):
import gentoopm.filters as f
import gentoopm.matchers as m
our_imports = (
('pm', pm),
('f', f),
('m', m))
welc = '''The following objects have been imported for you:\n'''
welc += '\n'.join(['\t%s: %s' % (key, repr(var))
for key, var in our_imports])
kwargs = {}
try:
from IPython import embed
except ImportError:
try:
from IPython.Shell import IPShellEmbed
except ImportError:
print('For better user experience, install IPython.')
from code import InteractiveConsole
embed = InteractiveConsole({'pm': pm}).interact
kwargs['banner'] = welc
else:
embed = IPShellEmbed()
embed.set_banner(embed.IP.BANNER + '\n\n' + welc)
else:
kwargs['banner2'] = welc
embed(**kwargs)
def __iter__(self):
for k in dir(self):
if k.startswith('_'):
continue
cls = getattr(self, k)
yield (k.replace('_', '-'), cls.help(), cls)
class PMQueryCLI(object):
""" A CLI for gentoopmq. """
def __init__(self):
self.argparser = arg = argparse.ArgumentParser()
all_pms = frozenset(_supported_pms)
arg.add_argument('-V', '--version',
action='version', version='%s %s' % (arg.prog, PV))
arg.add_argument('-p', '--package-manager',
action='store', help='Use a specific package manager',
choices=all_pms)
subp = arg.add_subparsers(title = 'Sub-commands')
for cmd_name, cmd_help, cmd_class in PMQueryCommands():
p = subp.add_parser(cmd_name, help=cmd_help)
cmd_class(p)
def main(self, argv):
arg = self.argparser
arg.prog = os.path.basename(argv[0])
args = arg.parse_args(argv[1:])
if args.package_manager is not None:
pm = get_pm(args.package_manager)
else:
try:
pm = get_package_manager()
except Exception:
arg.error('No working package manager could be found.')
return args.instance(pm, args) or 0
|
the-stack_0_17937 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Processing phase-difference (aka :abbr:`GRE (gradient-recalled echo)`) fieldmaps.
.. _gre-fieldmaps:
Workflows for processing :abbr:`GRE (gradient recalled echo)` fieldmaps
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Workflows for preparing the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmap
images and cleaning up the fieldmaps created from the phases or phasediff.
"""
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu, fsl, ants
from niflow.nipype1.workflows.dmri.fsl.utils import cleanup_edge_pipeline
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.images import IntraModalMerge
from niworkflows.interfaces.masks import BETRPT
def init_magnitude_wf(omp_nthreads, name='magnitude_wf'):
"""
Prepare the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmaps.
Average (if not done already) the magnitude part of the
:abbr:`GRE (gradient recalled echo)` images, run N4 to
correct for B1 field nonuniformity, and skull-strip the
preprocessed magnitude.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.fmap import init_magnitude_wf
wf = init_magnitude_wf(omp_nthreads=6)
Parameters
----------
omp_nthreads : int
Maximum number of threads an individual process may use
name : str
Name of workflow (default: ``prepare_magnitude_w``)
Inputs
------
magnitude : pathlike
Path to the corresponding magnitude path(s).
Outputs
-------
fmap_ref : pathlike
Path to the fieldmap reference calculated in this workflow.
fmap_mask : pathlike
Path to a binary brain mask corresponding to the reference above.
"""
workflow = Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(fields=['magnitude']), name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(fields=['fmap_ref', 'fmap_mask', 'mask_report']),
name='outputnode')
# Merge input magnitude images
magmrg = pe.Node(IntraModalMerge(), name='magmrg')
# de-gradient the fields ("bias/illumination artifact")
n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
name='n4_correct', n_procs=omp_nthreads)
bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True),
name='bet')
workflow.connect([
(inputnode, magmrg, [('magnitude', 'in_files')]),
(magmrg, n4_correct, [('out_file', 'input_image')]),
(n4_correct, bet, [('output_image', 'in_file')]),
(bet, outputnode, [('mask_file', 'fmap_mask'),
('out_file', 'fmap_ref'),
('out_report', 'mask_report')]),
])
return workflow
def init_fmap_postproc_wf(omp_nthreads, fmap_bspline, median_kernel_size=5,
name='fmap_postproc_wf'):
"""
Postprocess a B0 map estimated elsewhere.
This workflow denoises (mostly via smoothing) a B0 fieldmap.
Workflow Graph
.. workflow ::
:graph2use: orig
:simple_form: yes
from sdcflows.workflows.fmap import init_fmap_postproc_wf
wf = init_fmap_postproc_wf(omp_nthreads=6, fmap_bspline=False)
Parameters
----------
omp_nthreads : int
Maximum number of threads an individual process may use
fmap_bspline : bool
Whether the fieldmap should be smoothed and extrapolated to off-brain regions
using B-Spline basis.
median_kernel_size : int
Size of the kernel when smoothing is done with a median filter.
name : str
Name of workflow (default: ``fmap_postproc_wf``)
Inputs
------
fmap_mask : pathlike
A brain binary mask corresponding to this fieldmap.
fmap_ref : pathlike
A preprocessed magnitude/reference image for the fieldmap.
fmap : pathlike
A B0-field nonuniformity map (aka fieldmap) estimated elsewhere.
Outputs
-------
out_fmap : pathlike
Postprocessed fieldmap.
"""
workflow = Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(
fields=['fmap_mask', 'fmap_ref', 'fmap', 'metadata']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_fmap', 'metadata']),
name='outputnode')
if fmap_bspline:
from ..interfaces.fmap import FieldEnhance
# despike_threshold=1.0, mask_erode=1),
fmapenh = pe.Node(
FieldEnhance(unwrap=False, despike=False),
name='fmapenh', mem_gb=4, n_procs=omp_nthreads)
workflow.connect([
(inputnode, fmapenh, [('fmap_mask', 'in_mask'),
('fmap_ref', 'in_magnitude'),
('fmap_hz', 'in_file')]),
(fmapenh, outputnode, [('out_file', 'out_fmap')]),
])
else:
recenter = pe.Node(niu.Function(function=_recenter),
name='recenter', run_without_submitting=True)
denoise = pe.Node(fsl.SpatialFilter(
operation='median', kernel_shape='sphere',
kernel_size=median_kernel_size), name='denoise')
demean = pe.Node(niu.Function(function=_demean), name='demean')
cleanup_wf = cleanup_edge_pipeline(name="cleanup_wf")
workflow.connect([
(inputnode, cleanup_wf, [('fmap_mask', 'inputnode.in_mask')]),
(inputnode, recenter, [(('fmap', _pop), 'in_file')]),
(recenter, denoise, [('out', 'in_file')]),
(denoise, demean, [('out_file', 'in_file')]),
(demean, cleanup_wf, [('out', 'inputnode.in_file')]),
(cleanup_wf, outputnode, [('outputnode.out_file', 'out_fmap')]),
(inputnode, outputnode, [(('metadata', _pop), 'metadata')]),
])
return workflow
def _recenter(in_file):
"""Recenter the phase-map distribution to the -pi..pi range."""
from os import getcwd
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
nii = nb.load(in_file)
data = nii.get_fdata(dtype='float32')
msk = data != 0
msk[data == 0] = False
data[msk] -= np.median(data[msk])
out_file = fname_presuffix(in_file, suffix='_recentered',
newpath=getcwd())
nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_file)
return out_file
def _demean(in_file, in_mask=None, usemode=True):
"""
Subtract the median (since it is robuster than the mean) from a map.
Parameters
----------
usemode : bool
Use the mode instead of the median (should be even more robust
against outliers).
"""
from os import getcwd
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
nii = nb.load(in_file)
data = nii.get_fdata(dtype='float32')
msk = np.ones_like(data, dtype=bool)
if in_mask is not None:
msk[nb.load(in_mask).get_fdata(dtype='float32') < 1e-4] = False
if usemode:
from scipy.stats import mode
data[msk] -= mode(data[msk], axis=None)[0][0]
else:
data[msk] -= np.median(data[msk], axis=None)
out_file = fname_presuffix(in_file, suffix='_demean',
newpath=getcwd())
nb.Nifti1Image(data, nii.affine, nii.header).to_filename(out_file)
return out_file
def _pop(inlist):
if isinstance(inlist, (tuple, list)):
return inlist[0]
return inlist
|
the-stack_0_17940 | import os
import time
import sys
import glob
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.runnner import Runner
from experiments.util import plotting_util, util
def get_script_path():
"""
:return: the script path
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = get_script_path()
return script_dir
def default_config_path() -> str:
"""
:return: the default path to configuration file
"""
config_path = os.path.join(default_output_dir(), './config.json')
return config_path
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
q_agent_config = QAgentConfig(gamma=0.999, alpha=0.0005, epsilon=1, render=False, eval_sleep=0.9,
min_epsilon=0.01, eval_episodes=100, train_log_frequency=100,
epsilon_decay=0.9999, video=True, eval_log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/results/videos", num_episodes=20001,
eval_render=False, gifs=True, gif_dir=default_output_dir() + "/results/gifs",
eval_frequency=1000, attacker=True, defender=False, video_frequency=101,
save_dir=default_output_dir() + "/results/data")
env_name = "idsgame-random_defense-v3"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.TABULAR_Q_AGENT.value,
mode=RunnerMode.TRAIN_ATTACKER.value,
q_agent_config=q_agent_config, output_dir=default_output_dir(),
title="TrainingQAgent vs RandomDefender",
random_seeds=[0, 999, 299, 399, 499], run_many=True
)
return client_config
def write_default_config(path:str = None) -> None:
"""
Writes the default configuration to a json file
:param path: the path to write the configuration to
:return: None
"""
if path is None:
path = default_config_path()
config = default_config()
util.write_config_file(config, path)
def plot_csv(config: ClientConfig, eval_csv_path:str, train_csv_path: str, attack_stats_csv_path : str = None,
random_seed : int = 0) -> None:
"""
Plot results from csv files
:param config: client config
:param eval_csv_path: path to the csv file with evaluation results
:param train_csv_path: path to the csv file with training results
:param random_seed: the random seed of the experiment
:param attack_stats_csv_path: path to attack stats
:return: None
"""
plotting_util.read_and_plot_results(train_csv_path, eval_csv_path,
config.q_agent_config.train_log_frequency,
config.q_agent_config.eval_frequency, config.q_agent_config.eval_log_frequency,
config.q_agent_config.eval_episodes, config.output_dir, sim=False,
random_seed = random_seed, attack_stats_csv_path = attack_stats_csv_path)
def plot_average_results(experiment_title :str, config: ClientConfig, eval_csv_paths:list,
train_csv_paths: str) -> None:
"""
Plots average results after training with different seeds
:param experiment_title: title of the experiment
:param config: experiment config
:param eval_csv_paths: paths to csv files with evaluation data
:param train_csv_paths: path to csv files with training data
:return: None
"""
plotting_util.read_and_plot_average_results(experiment_title, train_csv_paths, eval_csv_paths,
config.q_agent_config.train_log_frequency,
config.q_agent_config.eval_frequency,
config.output_dir)
def run_experiment(configpath: str, random_seed: int, noconfig: bool):
"""
Runs one experiment and saves results and plots
:param configpath: path to experiment config file
:param noconfig: whether to override config
:return: (train_csv_path, eval_csv_path)
"""
if configpath is not None and not noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
time_str = str(time.time())
util.create_artefact_dirs(config.output_dir, random_seed)
logger = util.setup_logger("tabular_q_learning_vs_random_defense-v3", config.output_dir + "/results/logs/" +
str(random_seed) + "/",
time_str=time_str)
config.q_agent_config.save_dir = default_output_dir() + "/results/data/" + str(random_seed) + "/"
config.q_agent_config.video_dir= default_output_dir() + "/results/videos/" + str(random_seed) + "/"
config.q_agent_config.gif_dir= default_output_dir() + "/results/gifs/" + str(random_seed) + "/"
config.logger = logger
config.q_agent_config.logger = logger
config.q_agent_config.random_seed = random_seed
config.random_seed = random_seed
config.q_agent_config.to_csv(config.output_dir + "/results/hyperparameters/" + str(random_seed) + "/" + time_str + ".csv")
train_result, eval_result = Runner.run(config)
train_csv_path = ""
eval_csv_path = ""
if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
train_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_train" + ".csv"
train_result.to_csv(train_csv_path)
eval_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_eval" + ".csv"
eval_result.to_csv(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, random_seed)
return train_csv_path, eval_csv_path
# Program entrypoint
if __name__ == '__main__':
args = util.parse_args(default_config_path())
experiment_title = "Q-learning vs random defense"
if args.configpath is not None and not args.noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
if args.plotonly:
base_dir = default_output_dir() + "/results/data/"
train_csv_paths = []
eval_csv_paths = []
if config.run_many:
for seed in config.random_seeds:
train_csv_path = glob.glob(base_dir + str(seed) + "/*_train.csv")[0]
eval_csv_path = glob.glob(base_dir + str(seed) + "/*_eval.csv")[0]
attack_stats_csv_path = None
try:
attack_stats_csv_paths = glob.glob(base_dir + str(seed) + "/attack_stats_*.csv")
attack_stats_csv_path = list(filter(lambda x: "checkpoint" not in attack_stats_csv_paths, attack_stats_csv_paths))[0]
except:
pass
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, attack_stats_csv_path, random_seed=seed)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
else:
train_csv_path = glob.glob(base_dir + str(config.random_seed) + "/*_train.csv")[0]
eval_csv_path = glob.glob(base_dir + str(config.random_seed) + "/*_eval.csv")[0]
attack_stats_csv_path = None
try:
attack_stats_csv_paths = glob.glob(base_dir + str(config.random_seed) + "/attack_stats_*.csv")
attack_stats_csv_path = \
list(filter(lambda x: "checkpoint" not in attack_stats_csv_paths, attack_stats_csv_paths))[0]
except:
pass
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, attack_stats_csv_path=attack_stats_csv_path,
random_seed=config.random_seed)
else:
if not config.run_many:
run_experiment(args.configpath, 0, args.noconfig)
else:
train_csv_paths = []
eval_csv_paths = []
for seed in config.random_seeds:
train_csv_path, eval_csv_path = run_experiment(args.configpath, seed, args.noconfig)
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
|
the-stack_0_17942 | import os
from abc import ABC, abstractmethod
from datetime import datetime
from os import path
import torch as th
from tqdm import tqdm
import wandb
from ..controllers import BaseController, DQNController, DRLController, RandomController
from ..encoding import LabelEncoder
from ..envs import SMACEnv
from ..rl.replay import EpisodeReplayBuffer, PrioritizedEpisodeReplayBuffer, Transition
from ..training import TrainingConfig
# th.autograd.set_detect_anomaly(True)
class Runner(ABC):
def __init__(self, trainer: TrainingConfig):
self.training_config = trainer
self.controller: BaseController
@abstractmethod
def make_agent(
self,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
encoding_output_size,
graph_module_sizes,
action_hidden_size,
):
raise NotImplementedError
@abstractmethod
def run(self):
raise NotImplementedError
class SMACRunner(Runner, ABC):
def __init__(self, trainer: TrainingConfig):
super().__init__(trainer)
self.checkpoint_file = path.join(trainer.log_dir, "checkpoint.pth")
resume_run = path.isfile(self.checkpoint_file)
self.step_num = 0
self.episode_num = 0
previous_step_num = 0
if resume_run:
print("Checkpoint file found, loading initial data...")
checkpoint = th.load(self.checkpoint_file)
previous_step_num = checkpoint["total_steps"]
self.episode_num = checkpoint["n_episodes"]
self.step_num += previous_step_num
if not trainer.resume_run:
trainer.max_num_steps += previous_step_num
if self.step_num >= trainer.max_num_steps:
print("Number of training steps achieved or surpassed. EXITING.")
exit(0)
if not trainer.dry_run:
if not path.exists(trainer.log_dir):
os.makedirs(trainer.log_dir)
args_to_log = trainer.get_loggable_args()
wandb_config = {}
for key in args_to_log:
for subkey in args_to_log[key]:
wandb_config[subkey[0]] = subkey[1]
wandb.init(
project="hcanet",
name=trainer.run_name,
id=trainer.run_name,
dir="/tmp/wandb",
resume=resume_run,
config=wandb_config,
group=trainer.run_prefix,
)
if trainer.episode_priority is None:
self.memory = EpisodeReplayBuffer(trainer.replay_buffer_size)
else:
self.memory = PrioritizedEpisodeReplayBuffer(
trainer.replay_buffer_size,
trainer.replay_buffer_alpha,
)
self.pbar = tqdm(
initial=self.step_num - previous_step_num,
total=trainer.max_num_steps - previous_step_num,
smoothing=0,
)
replay_dir = path.join(trainer.log_dir, "game_replays", trainer.game_name)
self.env = SMACEnv(
map_name=trainer.game_name,
replay_dir=replay_dir,
reward_sparse=trainer.sparse_rewards,
)
env_info = self.env.get_env_info()
self.env.reset()
# this information can only be acquired after the environment is initialized
unit_types = self.env.get_unit_types()
n_agents = env_info["n_agents"]
n_actions = env_info["n_actions"]
# n_agent_features = len(env_info["agent_features"])
# n_enemy_features = len(env_info["enemy_features"])
v2_obs_shape = env_info["obs_shape"]
# get unit types from the environment
# normalize using label encoder
# ignore non-agent unit types
self.node_types = list(LabelEncoder(unit_types).transform(unit_types))
self.node_types = th.tensor(self.node_types[:n_agents], device=trainer.device).int()
agent_types = self.node_types.unique().tolist()
features_by_node_type = [v2_obs_shape] * len(agent_types)
actions_by_node_type = [n_actions] * len(agent_types)
self.controller = self.make_agent(
self.node_types.tolist(),
agent_types,
features_by_node_type,
actions_by_node_type,
trainer.encoding_hidden_size,
trainer.comms_sizes,
trainer.action_hidden_size,
)
if trainer.dry_run:
exit(0)
def sample_from_memory(self):
return (self.memory.sample(self.batch_size) if not self.memory.is_prioritized else
self.memory.sample(self.batch_size, self.replay_buffer_beta))
def maybe_backup_buffer(self):
if (self.step_num % self.replay_buffer_save_interval == 0 and
len(self.memory) >= self.replay_buffer_save_interval):
print("Saving a sample of the replay buffer to file...")
th.save(
self.memory.copy(self.replay_buffer_save_interval),
self.replay_buffer_file,
)
def log_episode(self, things_to_log, prefix="episode"):
# add the prefix to arg names
loggers_poggers = {}
for key in things_to_log:
loggers_poggers[prefix + "/" + key] = things_to_log[key]
wandb.log(loggers_poggers, step=self.step_num)
class OffPolicySMACRunner(SMACRunner):
def make_agent(
self,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
encoding_output_size,
graph_module_sizes,
action_hidden_size,
):
return DQNController(
self.checkpoint_file,
self.training_config.action_module,
self.training_config.policy,
self.training_config.max_num_steps,
self.training_config.batch_size,
self.training_config.optimizer,
self.training_config.lr,
self.training_config.weight_decay,
self.training_config.rmsprop_alpha,
self.training_config.rmsprop_eps,
self.training_config.trr_coef,
self.training_config.checkpoint_save_secs,
self.training_config.graph_layer_type,
self.training_config.share_encoding,
self.training_config.share_comms,
self.training_config.share_action,
self.training_config.full_agent_communication,
self.training_config.full_receptive_field,
self.training_config.gat_n_heads,
self.training_config.gat_average_last,
self.training_config.rgcn_n2_relations,
self.training_config.rgcn_num_bases,
self.training_config.rgcn_fast,
self.training_config.device,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
self.training_config.training_mode,
self.training_config.data_parallel,
self.training_config.act_encoding,
self.training_config.act_comms,
self.training_config.act_action,
self.training_config.use_rnn_encoding,
self.training_config.use_rnn_action,
self.training_config.gamma,
self.training_config.eps_start,
self.training_config.eps_end,
self.training_config.eps_anneal_time,
self.training_config.target_update,
self.training_config.double_dqn,
self.training_config.mixer,
encoding_output_size=encoding_output_size,
graph_module_sizes=graph_module_sizes,
action_hidden_size=action_hidden_size,
)
def run(self):
last_eval = 0
training_start = datetime.now()
while self.step_num < self.training_config.max_num_steps:
step_start = self.step_num
time_start = datetime.now()
episode, episode_reward, info = self.play_episode()
# training_mode is true when not in eval mode
# TODO this variable seems useless
if self.training_config.training_mode:
# Store the transition in memory
self.memory.add(episode)
# self.trainer.maybe_backup_buffer()
# Perform one step of the optimization (on the target network)
if self.memory.can_sample(self.training_config.batch_size):
self.controller.policy_net.train()
self.controller.optimize(self.step_num, self.training_config, self.memory)
self.controller.maybe_save_checkpoint(self.step_num)
# Update the target network, copying all
# weights and biases from the policy network
if self.episode_num % self.training_config.target_update == 0:
self.controller.update_target_net()
things_to_log = {
"episode_reward": episode_reward,
"battles_won": self.env.get_stats()["battles_won"],
"time_secs": (datetime.now() - time_start).total_seconds(),
"num_steps": self.step_num - step_start, }
if "dead_allies" in info:
things_to_log["dead_allies"] = info["dead_allies"]
things_to_log["dead_enemies"] = info["dead_enemies"]
self.log_episode(things_to_log)
# evaluation
# only evaluate if has already been trained
if (self.memory.can_sample(self.training_config.batch_size) and
self.step_num - last_eval >= self.training_config.eval_interval):
last_eval = self.step_num - (self.step_num % self.training_config.eval_interval)
self.evaluate(n_episodes=self.training_config.eval_episodes)
# release GPU cache alongside evaluation
th.cuda.empty_cache()
with open(path.join(self.training_config.log_dir, "run_time.txt"), "a") as f:
f.write(str(datetime.now() - training_start))
self.env.close()
def play_episode(self):
self.env.reset()
current_state = self.env.get_graph_state(
self.node_types,
self.controller.agent_types if self.controller.full_agent_communication else None,
v2=self.training_config.v2_state,
)
self.episode_num += 1
episode_reward = 0
episode_steps = 0
episode = []
done = False
with th.no_grad():
self.controller.policy_net.eval()
self.controller.policy_net.action_layer.init_hidden(1)
while not done:
episode_steps += 1
self.step_num += 1
# I did this and the network learned something
# batch = [t.state.to(self.controller.device) for t in episode] + [current_state]
# batch = Batch.from_data_list(batch)
# q_vals = self.controller.policy_net(batch)
q_vals = self.controller.policy_net(current_state.to(self.training_config.device))
# Select and perform an action
av_actions = self.env.get_avail_actions()
actions = (self.controller.act(q_vals[0], av_actions, self.step_num).detach().cpu())
# if isinstance(self.controller, MultiAgentActorCritic):
# actions = actions[0]
reward, done, info = self.env.step(actions)
if self.training_config.render_eval:
self.env.render()
self.pbar.update()
# observe new state
next_state = (None if done else self.env.get_graph_state(
self.node_types,
self.controller.agent_types if self.controller.full_agent_communication else None,
v2=self.training_config.v2_state,
))
for key, item in current_state:
item = item.detach()
# pass everything to CPU for storage
# NOTE I don't know if this actually saves GPU memory
for i, _ in enumerate(av_actions):
av_actions[i] = av_actions[i].cpu()
episode.append(Transition(current_state.cpu(), actions, reward, float(done),
av_actions))
# Move to the next state
current_state = next_state
episode_reward += reward
return episode, episode_reward, info
def evaluate(self, n_episodes=32, close_env=False):
time_start = datetime.now()
battles_won = dead_allies = dead_enemies = eval_reward = 0
for _ in tqdm(range(n_episodes), desc="Ep."):
episode, episode_reward, info = self.play_episode()
eval_reward += episode_reward
if "dead_allies" in info:
dead_allies += info["dead_allies"]
dead_enemies += info["dead_enemies"]
if "battle_won" in info:
battles_won += info["battle_won"]
if self.training_config.save_replays:
self.env.save_replay()
things_to_log = {
"episode_reward": (eval_reward / n_episodes),
"battles_won": battles_won / n_episodes,
"time_secs": (datetime.now() - time_start).total_seconds() / n_episodes, }
if "dead_allies" in info:
things_to_log["dead_allies"] = dead_allies / n_episodes
things_to_log["dead_enemies"] = dead_enemies / n_episodes
self.log_episode(things_to_log, prefix="eval")
if close_env:
self.env.close()
class RandomSMACRunner(SMACRunner):
def make_agent(
self,
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
encoding_hidden_sizes=None,
encoding_output_size=None,
graph_hidden_sizes=None,
graph_output_size=None,
action_hidden_size=None,
):
return RandomController(
node_types,
agent_types,
features_by_node_type,
actions_by_node_type,
self.training_config.device,
)
def run(self):
self.controller.initialize()
while self.step_num < self.training_config.max_num_steps:
step_start = self.step_num
time_start = datetime.now()
self.episode_num += 1
episode_reward = th.zeros(self.controller.n_agents, requires_grad=False)
episode_steps = 0
done = False
while not done:
if (self.training_config.max_steps_episode is not None and
episode_steps >= self.training_config.max_steps_episode):
break
episode_steps += 1
self.step_num += 1
# Select and perform an action
actions = self.controller.act(self.env.get_avail_actions())
reward, done, info = self.env.step(actions)
reward = th.tensor([reward] * self.controller.n_agents, dtype=th.float)
if self.training_config.render_train:
self.env.render()
self.pbar.update()
episode_reward += reward
self.log_episode(step_start, time_start, episode_reward.mean(), info)
if self.step_num < self.training_config.max_num_steps:
self.env.reset()
self.env.close()
if __name__ == "__main__":
trainer = TrainingConfig()
trainer.initialize()
runner: Runner
if trainer.game == TrainingConfig.GameType.SMAC:
# if trainer.action_module in TrainingConfig.OFF_POLICY_METHODS:
runner = OffPolicySMACRunner(trainer)
# elif trainer.action_module == TrainingConfig.ActionModuleType.RANDOM:
# runner = RandomSMACRunner(trainer)
else:
raise ValueError("Game or action module type does not exist")
try:
runner.run()
except (Exception, KeyboardInterrupt) as e:
if isinstance(runner.controller, DRLController):
print("Something happened, saving checkpoint...")
runner.controller.save_checkpoint(runner.training_config.step_num)
if not isinstance(e, KeyboardInterrupt):
with open(path.join(trainer.log_dir, "log.txt"), "a") as f:
import traceback
f.write(str(e))
f.write(traceback.format_exc())
raise e
|
the-stack_0_17944 | import csv
import cv2
import numpy as np
path1='/home/workspace/CarND-Behavioral-Cloning-P3/data/'
path2='/opt/training/'
images = []
measurements = []
def flip(image,measurment):
image_flipped = np.fliplr(image)
measurement_flipped = -measurment
images.append(image_flipped)
measurements.append(measurement_flipped)
def load(path):
lines=[]
with open(path+'driving_log.csv') as csvfile:
reader=csv.reader(csvfile)
for line in reader:
lines.append(line)
i=0
for line in lines:
if i==0:
i+=1
continue
center_path = line[0]
left_path = line[1]
right_path = line[2]
filename_center=center_path.split('/')[-1]
filename_left=left_path.split('/')[-1]
filename_right=right_path.split('/')[-1]
path_center = path + 'IMG/' + filename_center
path_left = path + 'IMG/' + filename_left
path_right = path + 'IMG/' + filename_right
image_center = cv2.imread(path_center)
image_left = cv2.imread(path_left)
image_right = cv2.imread(path_right)
measurment_center = float(line[3])
measurment_left = float(line[3]) + 0.25
measurment_right = float(line[3]) - 0.25
images.append(image_center)
images.append(image_left)
images.append(image_right)
measurements.append(measurment_center)
measurements.append(measurment_left)
measurements.append(measurment_right)
# Flip the image to gain more training data
flip(image_center,measurment_center)
flip(image_left,measurment_left)
flip(image_right,measurment_right)
load(path1)
load(path2)
X_train = np.array(images)
y_train = np.array(measurements)
from keras.models import Sequential
from keras.layers import Lambda, Cropping2D, ELU
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Conv2D(filters=24, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=36, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(units=120, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=84, activation='relu'))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=1))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=3)
model.save('model.h5')
|
the-stack_0_17945 | import os
import re
import sys
from py._io.terminalwriter import get_terminal_width
from . import __version__ as testmynb__version__
from ._ansi import green, red, orange, strip_ansi
class TestHandler:
def __init__(self, *notebooks):
self.notebooks = notebooks
@property
def _summary(self):
notebook_count = len(self.notebooks)
test_count = sum([len(nb.extract_codes()) for nb in self.notebooks])
py_ver = re.sub(r"\s.*", "", sys.version)
header = self._h1_message("Test My Notebook ({})".format(testmynb__version__))
return "{}".format(header) + "\n".join(
[
"Platform {}".format(sys.platform),
"Python {}".format(py_ver),
"Working Directory: {}".format(os.getcwd()),
"",
"{0} test cells across {1} notebook(s) detected.".format(
test_count, notebook_count
),
"",
]
)
@staticmethod
def _h1_message(message):
col = get_terminal_width()
no_formats = strip_ansi(message)
# Remove the ANSI escape codes to check the message length
num_equals = (col - len(no_formats) - 3) // 2
equals_sign = num_equals * "="
return "{1} {0} {1}\n".format(message, equals_sign)
@property
def _notebook_summary_section(self):
section = ["Notebooks:\n"]
for nb in self.notebooks:
trust = green("Trusted") if nb.trusted else red("Untrusted")
string = "{} {}: {}\n".format(trust, nb.name, nb.result)
section.append(string)
section.append("\n")
return "".join(section)
def __call__(self):
failed_or_error = False
output_message = list()
for nb in self.notebooks:
nb()
output_message.append(self._summary)
output_message.append(self._notebook_summary_section)
errors = self.collect_errors()
fails = self.collect_fails()
if fails:
failed_or_error = True
head_message = red(self._h1_message("Failed Test(s)"))
output_message.append(head_message)
for cell, err in fails.items():
string = "---- {}: {} ----\n".format(cell.notebook, cell.name)
output_message.append(string)
output_message.append(str(cell))
output_message.append(
red("\n-----------------------------------------\n")
)
output_message.append(err)
output_message.append("\n\n")
if errors:
failed_or_error = True
head_message = orange(self._h1_message("Errored Test(s)"))
output_message.append(head_message)
for cell, err in errors.items():
string = "---- {}: {} ----\n".format(cell.notebook, cell.name)
output_message.append(string)
output_message.append(str(cell))
output_message.append(
red("\n-----------------------------------------\n")
)
output_message.append(err)
output_message.append("\n\n")
output_message.append(self._final_remarks)
output_message = "".join(output_message)
print(output_message)
if failed_or_error:
sys.exit(1)
@property
def _final_remarks(self):
all_tests = "".join([nb.result for nb in self.notebooks])
passed_test_count = all_tests.count(".")
failed_test_count = all_tests.count("F")
errored_test_count = all_tests.count("E")
passed_text = green("{} test(s) passed".format(passed_test_count))
failed_text = red("{} failed".format(failed_test_count))
error_text = orange(" and {} raised an error".format(errored_test_count))
return self._h1_message(
"{}, {},{}".format(passed_text, failed_text, error_text)
)
def collect_errors(self):
errors = dict()
for nb in self.notebooks:
errors.update(nb.get_error_stack())
return errors
def collect_fails(self):
fails = dict()
for nb in self.notebooks:
fails.update(nb.get_fail_stack())
return fails
def find_notebooks(*args):
notebooks = list()
if len(args):
for path in args:
if os.path.isfile(path):
notebooks.append(path)
elif os.path.isdir(path):
notebooks.extend(_recursive_find_notebooks(path))
else:
notebooks = _recursive_find_notebooks(os.getcwd())
return notebooks
def _recursive_find_notebooks(path):
notebooks = list()
for root, dirs, files in os.walk(path):
for file in files:
if ".ipynb_checkpoints" in root:
continue
if re.match(r"^test_.+\.ipynb", file):
notebooks.append(os.path.join(root, file))
return notebooks
|
the-stack_0_17947 | import numpy as np
import os.path as osp
from rllab import spaces
from rllab.envs.base import Env
from rllab.misc.overrides import overrides
from rllab.mujoco_py import MjModel, MjViewer
from rllab.misc import autoargs
from rllab.misc import logger
import theano
import tempfile
import os
import mako.template
import mako.lookup
MODEL_DIR = osp.abspath(
osp.join(
osp.dirname(__file__),
'../../../vendor/mujoco_models'
)
)
BIG = 1e6
class MujocoEnv(Env):
FILE = None
@autoargs.arg('action_noise', type=float,
help='Noise added to the controls, which will be '
'proportional to the action bounds')
def __init__(self, action_noise=0.0, file_path=None, template_args=None):
# compile template
if file_path is None:
if self.__class__.FILE is None:
raise "Mujoco file not specified"
file_path = osp.join(MODEL_DIR, self.__class__.FILE)
if file_path.endswith(".mako"):
lookup = mako.lookup.TemplateLookup(directories=[MODEL_DIR])
with open(file_path) as template_file:
template = mako.template.Template(
template_file.read(), lookup=lookup)
content = template.render(
opts=template_args if template_args is not None else {},
)
tmp_f, file_path = tempfile.mkstemp(text=True)
with open(file_path, 'w') as f:
f.write(content)
self.model = MjModel(file_path)
os.close(tmp_f)
else:
self.model = MjModel(file_path)
self.data = self.model.data
self.viewer = None
self.init_qpos = self.model.data.qpos
self.init_qvel = self.model.data.qvel
self.init_qacc = self.model.data.qacc
self.init_ctrl = self.model.data.ctrl
self.qpos_dim = self.init_qpos.size
self.qvel_dim = self.init_qvel.size
self.ctrl_dim = self.init_ctrl.size
self.action_noise = action_noise
if "frame_skip" in self.model.numeric_names:
frame_skip_id = self.model.numeric_names.index("frame_skip")
addr = self.model.numeric_adr.flat[frame_skip_id]
self.frame_skip = int(self.model.numeric_data.flat[addr])
else:
self.frame_skip = 1
if "init_qpos" in self.model.numeric_names:
init_qpos_id = self.model.numeric_names.index("init_qpos")
addr = self.model.numeric_adr.flat[init_qpos_id]
size = self.model.numeric_size.flat[init_qpos_id]
init_qpos = self.model.numeric_data.flat[addr:addr + size]
self.init_qpos = init_qpos
self.dcom = None
self.current_com = None
self.reset()
super(MujocoEnv, self).__init__()
@property
@overrides
def action_space(self):
bounds = self.model.actuator_ctrlrange
lb = bounds[:, 0]
ub = bounds[:, 1]
return spaces.Box(lb, ub)
@property
@overrides
def observation_space(self):
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
return spaces.Box(ub * -1, ub)
@property
def action_bounds(self):
return self.action_space.bounds
def reset_mujoco(self, init_state=None):
if init_state is None:
self.model.data.qpos = self.init_qpos + \
np.random.normal(size=self.init_qpos.shape) * 0.01
self.model.data.qvel = self.init_qvel + \
np.random.normal(size=self.init_qvel.shape) * 0.1
self.model.data.qacc = self.init_qacc
self.model.data.ctrl = self.init_ctrl
else:
start = 0
for datum_name in ["qpos", "qvel", "qacc", "ctrl"]:
datum = getattr(self.model.data, datum_name)
datum_dim = datum.shape[0]
datum = init_state[start: start + datum_dim]
setattr(self.model.data, datum_name, datum)
start += datum_dim
@overrides
def reset(self, init_state=None):
self.reset_mujoco(init_state)
self.model.forward()
self.current_com = self.model.data.com_subtree[0]
self.dcom = np.zeros_like(self.current_com)
return self.get_current_obs()
def get_current_obs(self):
return self._get_full_obs()
def _get_full_obs(self):
data = self.model.data
cdists = np.copy(self.model.geom_margin).flat
for c in self.model.data.contact:
cdists[c.geom2] = min(cdists[c.geom2], c.dist)
obs = np.concatenate([
data.qpos.flat,
data.qvel.flat,
# data.cdof.flat,
data.cinert.flat,
data.cvel.flat,
# data.cacc.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
data.qfrc_constraint.flat,
cdists,
# data.qfrc_bias.flat,
# data.qfrc_passive.flat,
self.dcom.flat,
])
return obs
@property
def _state(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat
])
@property
def _full_state(self):
return np.concatenate([
self.model.data.qpos,
self.model.data.qvel,
self.model.data.qacc,
self.model.data.ctrl,
]).ravel()
def inject_action_noise(self, action):
# generate action noise
noise = self.action_noise * \
np.random.normal(size=action.shape)
# rescale the noise to make it proportional to the action bounds
lb, ub = self.action_bounds
noise = 0.5 * (ub - lb) * noise
return action + noise
def forward_dynamics(self, action):
self.model.data.ctrl = self.inject_action_noise(action)
for _ in range(self.frame_skip):
self.model.step()
self.model.forward()
new_com = self.model.data.com_subtree[0]
self.dcom = new_com - self.current_com
self.current_com = new_com
def get_viewer(self):
if self.viewer is None:
self.viewer = MjViewer()
self.viewer.start()
self.viewer.set_model(self.model)
return self.viewer
def render(self, close=False):
if close:
self.stop_viewer()
else:
#self.get_viewer().render()
self.get_viewer().loop_once()
data, width, height = self.get_viewer().get_image()
return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :]
return None
def start_viewer(self):
viewer = self.get_viewer()
if not viewer.running:
viewer.start()
def stop_viewer(self):
if self.viewer:
self.viewer.finish()
self.viewer = None
def release(self):
# temporarily alleviate the issue (but still some leak)
from rllab.mujoco_py.mjlib import mjlib
mjlib.mj_deleteModel(self.model._wrapped)
mjlib.mj_deleteData(self.data._wrapped)
def get_body_xmat(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.xmat[idx].reshape((3, 3))
def get_body_com(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.data.com_subtree[idx]
def get_body_comvel(self, body_name):
idx = self.model.body_names.index(body_name)
return self.model.body_comvels[idx]
def print_stats(self):
super(MujocoEnv, self).print_stats()
print("qpos dim:\t%d" % len(self.model.data.qpos))
def action_from_key(self, key):
raise NotImplementedError
|
the-stack_0_17950 | from collections import defaultdict, deque
import re
CHALLENGE_DAY = "22"
REAL = open(CHALLENGE_DAY + ".txt").read()
SAMPLE = open(CHALLENGE_DAY + ".sample.txt").read()
SAMPLE_EXPECTED = 306
# SAMPLE_EXPECTED =
def parse_lines(raw):
# Groups.
groups = raw.split("\n\n")
g1 = map(int, groups[0].split("\n")[1:])
q1 = deque(g1)
g2 = map(int, groups[1].split("\n")[1:])
q2 = deque(g2)
return q1, q2
# return list(map(lambda group: group.split("\n"), groups))
# lines = raw.split("\n")
# return lines # raw
# return list(map(lambda l: l.split(" "), lines)) # words.
# return list(map(int, lines))
# return list(map(lambda l: l.strip(), lines)) # beware leading / trailing WS
def solve(raw):
p1, p2 = parse_lines(raw)
# Debug here to make sure parsing is good.
while p1 and p2:
c1 = int(p1.popleft())
c2 = int(p2.popleft())
if c1 > c2:
p1.append(c1)
p1.append(c2)
else:
p2.append(c2)
p2.append(c1)
if p1:
winner = p1
else:
winner = p2
ret = 0
val = 1
while winner:
ret += val * winner.pop()
val += 1
return ret
sample = solve(SAMPLE)
if sample != SAMPLE_EXPECTED:
print("SAMPLE FAILED: ", sample, " != ", SAMPLE_EXPECTED)
assert sample == SAMPLE_EXPECTED
print("\n*** SAMPLE PASSED ***\n")
solved = solve(REAL)
print("SOLUTION: ", solved)
import pandas as pd
df=pd.DataFrame([str(solved)])
df.to_clipboard(index=False,header=False)
print("COPIED TO CLIPBOARD")
|
the-stack_0_17951 | import os
import luigi
import uuid
import cluster_tools.utils.volume_utils as vu
from ..graph import GraphWorkflow
from ..cluster_tasks import WorkflowBase
from ..features import EdgeFeaturesWorkflow
from .. import copy_volume as copy_tasks
from . import prediction as predict_tasks
from . import merge_predictions as merge_tasks
from .carving import WriteCarving
class IlastikPredictionWorkflow(WorkflowBase):
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
ilastik_folder = luigi.Parameter()
ilastik_project = luigi.Parameter()
halo = luigi.ListParameter()
n_channels = luigi.IntParameter()
def requires(self):
is_h5 = vu.is_h5(self.output_path)
out_key = None if is_h5 else self.output_key
predict_task = getattr(predict_tasks,
self._get_task_name('Prediction'))
dep = predict_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=out_key,
ilastik_folder=self.ilastik_folder,
ilastik_project=self.ilastik_project,
halo=self.halo, n_channels=self.n_channels)
# we only need to merge the predictions seperately if the
# output file is hdf5
if is_h5:
output_prefix = os.path.splitext(self.output_path)[0]
merge_task = getattr(merge_tasks,
self._get_task_name('MergePredictions'))
dep = merge_task(tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir,
dependency=dep,
input_path=self.input_path,
input_key=self.input_key,
tmp_prefix=output_prefix,
output_path=self.output_path,
output_key=self.output_key,
halo=self.halo,
n_channels=self.n_channels)
return dep
@staticmethod
def get_config():
configs = super(IlastikPredictionWorkflow, IlastikPredictionWorkflow).get_config()
configs.update({'prediction':
predict_tasks.PredictionLocal.default_task_config(),
'merge_predictions':
merge_tasks.MergePredictionsLocal.default_task_config()})
return configs
class IlastikCarvingWorkflow(WorkflowBase):
""" Make carving project with watershed and graph
"""
input_path = luigi.Parameter()
input_key = luigi.Parameter()
watershed_path = luigi.Parameter()
watershed_key = luigi.Parameter()
output_path = luigi.Parameter()
copy_inputs = luigi.BoolParameter(default=False)
def requires(self):
tmp_path = os.path.join(self.tmp_folder, 'exp_data.n5')
graph_key = 'graph'
feat_key = 'feats'
# TODO make param ?
max_jobs_merge = 1
dep = GraphWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=self.max_jobs, target=self.target,
dependency=self.dependency,
input_path=self.watershed_path, input_key=self.watershed_key,
graph_path=tmp_path, output_key=graph_key)
dep = EdgeFeaturesWorkflow(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=self.max_jobs, target=self.target, dependency=dep,
input_path=self.input_path, input_key=self.input_key,
labels_path=self.watershed_path,
labels_key=self.watershed_key,
graph_path=tmp_path, graph_key=graph_key,
output_path=tmp_path, output_key=feat_key,
max_jobs_merge=max_jobs_merge)
# write the carving graph data and metadata
uid = str(uuid.uuid1())
dep = WriteCarving(input_path=tmp_path, graph_key=graph_key, features_key=feat_key,
raw_path=self.input_path, raw_key=self.input_key, uid=uid,
output_path=self.output_path, copy_inputs=self.copy_inputs,
dependency=dep)
# TODO
# we need to transpose the data before copying
# that's why return here for now, to do the transposing outside of
# cluster_tools, but should implement it here as well
return dep
copy_task = getattr(copy_tasks, self._get_task_name('CopyVolume'))
# copy the watershed segmentation to ilastik file
ilastik_seg_key = 'preprocessing/graph/labels'
ilastik_seg_dtype = 'uint32' # TODO is uint32 correct ?
dep = copy_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=1, dependency=dep,
input_path=self.watershed_path, input_key=self.watershed_key,
output_path=self.output_path, output_key=ilastik_seg_key,
dtype=ilastik_seg_dtype, prefix='watershed')
# copy the input map to ilastik file
if self.copy_inputs:
ilastik_inp_key = 'Input Data/local_data/%s' % uid
ilastik_inp_dtype = 'float32' # is float32 correct ?
dep = copy_task(tmp_folder=self.tmp_folder, config_dir=self.config_dir,
max_jobs=1, dependency=dep,
input_path=self.input_path, input_key=self.input_key,
output_path=self.output_path, output_key=ilastik_inp_key,
dtype=ilastik_inp_dtype, prefix='inputs')
return dep
@staticmethod
def get_config():
configs = super(IlastikCarvingWorkflow, IlastikCarvingWorkflow).get_config()
configs.update({"copy_volume": copy_tasks.CopyVolumeLocal.default_task_config(),
**EdgeFeaturesWorkflow.get_config(),
**GraphWorkflow.get_config()})
return configs
|
the-stack_0_17954 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self
"""MXNet symbol frontend."""
from __future__ import absolute_import as _abs
import json
import tvm
from .. import symbol as _sym
from .common import get_nnvm_op, required_attr, parse_tshape, parse_bool_str
__all__ = ['from_mxnet']
def _rename(new_name):
def impl(inputs, attrs):
return get_nnvm_op(new_name)(*inputs, **attrs)
return impl
def _pooling(inputs, attrs):
kernel = parse_tshape(required_attr(attrs, 'kernel', 'pooling'))
if len(kernel) != 2:
raise tvm.error.OpAttributeUnImplemented(
'Non-2D kernels are not supported for Pool2D.')
global_pool = 'global' if parse_bool_str(attrs, 'global_pool') else ''
pool_type = required_attr(attrs, 'pool_type', 'pooling')
if pool_type not in ['avg', 'max']:
raise tvm.error.OpNotImplemented(
'Only max and average pooling are supported in frontend MXNet.')
op_name, new_attrs = '_'.join([global_pool, pool_type, 'pool2d']).strip('_'), {}
# new_attrs['layout'] = 'NCHW'
if not global_pool:
new_attrs['pool_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['ceil_mode'] = (attrs.get('pooling_convention', 'valid') == 'full')
if pool_type == 'avg':
new_attrs['count_include_pad'] = attrs.get('count_include_pad', True)
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _batch_norm(inputs, attrs):
if parse_bool_str(attrs, 'output_mean_var'):
raise tvm.error.OpAttributeUnImplemented(
'Attribute "output_mean_var" is not supported in operator batch_norm.')
# if parse_bool_str(attrs, 'fix_gamma'):
# _warn_not_used('fix_gamma', 'batch_norm')
if parse_bool_str(attrs, 'use_global_stats'):
from warnings import warn
warn(
'Attribute "use_global_stats" is ignored in operator batch_norm.')
# if parse_bool_str(attrs, 'momentum'):
# _warn_not_used('momentum', 'batch_norm')
op_name, new_attrs = 'batch_norm', {}
new_attrs['axis'] = attrs.get('axis', 1)
new_attrs['epsilon'] = attrs.get('eps', 0.001)
new_attrs['center'] = True
new_attrs['scale'] = not parse_bool_str(attrs, 'fix_gamma', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _concat(inputs, attrs):
op_name = 'concatenate'
new_attrs = {'axis': attrs.get('dim', 1)}
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _conv2d(inputs, attrs):
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d'))
if len(kernel) != 2:
raise tvm.error.OpAttributeUnimplemented(
'Non-2D kernels are not supported for operator Conv2D.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise tvm.error.OpAttributeUnimplemented(
'Layout {} is not supported in operator Conv2D.'.format(layout))
if 'kernel_layout' in attrs:
kernel_layout = attrs['kernel_layout']
else:
kernel_layout = 'HWIO' if layout == 'NHWC' else 'OIHW'
op_name, new_attrs = 'conv2d', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d')
new_attrs['kernel_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
new_attrs['kernel_layout'] = kernel_layout
new_attrs['use_bias'] = attrs.get('no_bias', 'False').strip() == 'False'
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _conv2d_transpose(inputs, attrs):
if 'target_shape' in attrs:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "target_shape" is not supported in operator Conv2D-transpose.')
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d_transpose'))
if len(kernel) != 2:
raise tvm.error.OpAttributeInvalid(
'Non-2D kernels are not supported in Conv2D-transpose.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise tvm.error.OpAttributeUnimplemented(
'Layout {} is not supported in operator Conv2D-transpose.')
if 'kernel_layout' in attrs:
kernel_layout = attrs['kernel_layout']
else:
kernel_layout = 'HWIO' if layout == 'NHWC' else 'OIHW'
op_name, new_attrs = 'conv2d_transpose', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d_transpose')
new_attrs['kernel_size'] = kernel
new_attrs['strides'] = attrs.get('stride', (1, 1))
new_attrs['output_padding'] = attrs.get('adj', (0, 0))
new_attrs['padding'] = attrs.get('pad', (0, 0))
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
new_attrs['kernel_layout'] = kernel_layout
new_attrs['use_bias'] = not parse_bool_str(attrs, 'no_bias')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _dense(inputs, attrs):
import mxnet as mx
op_name, new_attrs = 'dense', {}
new_attrs['units'] = required_attr(attrs, 'num_hidden', 'dense')
new_attrs['use_bias'] = not parse_bool_str(attrs, 'no_bias')
try:
_ = mx.sym.FullyConnected(mx.sym.var('x'), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = parse_bool_str(attrs, 'flatten', 'True')
if has_flatten and use_flatten:
inputs[0] = _sym.flatten(inputs[0])
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _dropout(inputs, attrs):
op_name, new_attrs = 'dropout', {}
new_attrs['rate'] = attrs.get('p', 0.5)
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _leaky_relu(inputs, attrs):
act_type = required_attr(attrs, 'act_type', 'leaky_relu')
if act_type in ['leaky', 'prelu']:
op_name, new_attrs = act_type, {}
if act_type == 'leaky':
new_attrs['alpha'] = attrs.get('slope', 0.25)
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
elif act_type == 'elu':
slope = attrs.get('slope', 0.25)
sym = -slope * _sym.relu(1 - _sym.exp(*inputs)) + _sym.relu(*inputs)
elif act_type == 'rrelu':
lower_bound = float(required_attr(attrs, 'lower_bound', 'leaky_relu'))
upper_bound = float(required_attr(attrs, 'upper_bound', 'leaky_relu'))
slope = (lower_bound + upper_bound) / 2.0
op_name, new_attrs = 'leaky_relu', {'alpha': str(slope)}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(act_type))
return sym
def _activations(inputs, attrs):
act_type = required_attr(attrs, 'act_type', 'activations')
if act_type in ['relu', 'sigmoid', 'tanh']:
op_name, new_attrs = act_type, {}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
elif act_type == 'softrelu':
sym = _sym.log((1 + _sym.exp(*inputs)))
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(act_type))
return sym
def _reshape(inputs, attrs):
if parse_bool_str(attrs, 'reverse'):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "reverse" is not supported in operator Reshape.')
op_name, new_attrs = 'reshape', {}
new_attrs['shape'] = required_attr(attrs, 'shape', 'reshape')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _slice(inputs, attrs):
begin = attrs.get('begin', None)
end = attrs.get('end', None)
stride = attrs.get('step', None)
if begin is None or end is None:
raise RuntimeError('begin and end are required params')
if 'None' in begin or 'None' in end:
raise RuntimeError('None in begin or end not supported yet...')
new_attrs = {'begin': begin, 'end': end}
if stride is not None:
new_attrs['stride'] = stride
return get_nnvm_op('strided_slice')(inputs[0], **new_attrs)
def _split(inputs, attrs):
op_name, new_attrs = 'split', {}
axis = attrs.get('axis', 1)
new_attrs['indices_or_sections'] = required_attr(attrs, 'num_outputs', 'split')
new_attrs['axis'] = axis
outputs = get_nnvm_op(op_name)(*inputs, **new_attrs)
if parse_bool_str(attrs, 'squeeze_axis'):
squeeze_attrs = {'axis': axis}
outputs = _sym.Group([get_nnvm_op('squeeze')(o, **squeeze_attrs) for o in outputs])
return outputs
def _softmax_activation(inputs, attrs):
op_name, new_attrs = 'softmax', {}
mode = attrs.get('mode', 'instance')
new_attrs['axis'] = 0 if mode == 'instance' else 1
return get_nnvm_op(op_name)(inputs[0], **new_attrs)
def _softmax_output(inputs, attrs):
op_name, new_attrs = 'softmax', {}
if parse_bool_str(attrs, 'multi_output'):
new_attrs['axis'] = 1
return get_nnvm_op(op_name)(inputs[0], **new_attrs)
def _upsampling(inputs, attrs):
scale = attrs.get('scale')
new_attrs = {'scale':int(scale)}
return get_nnvm_op('upsampling')(inputs[0], **new_attrs)
def _clip(inputs, attrs):
op_name, new_attrs = "clip", {}
new_attrs['a_min'] = required_attr(attrs, 'a_min', 'clip')
new_attrs['a_max'] = required_attr(attrs, 'a_max', 'clip')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _contrib_multibox_detection(inputs, attrs):
clip = parse_bool_str(attrs, 'clip', default='True')
threshold = attrs.get('threshold') or 0.01
nms_threshold = attrs.get('nms_threshold') or 0.5
force_suppress = parse_bool_str(attrs, 'force_suppress', default='False')
variances = tuple([float(x.strip()) for x in attrs.get('variances').strip('()').split(',')]) \
if attrs.get('variances') is not None else (0.1, 0.1, 0.2, 0.2)
nms_topk = attrs.get('nms_topk') or -1
new_attrs0 = {'clip': clip, 'threshold': float(threshold), 'variances': variances}
new_attrs1 = {'return_indices': False, 'iou_threshold': float(nms_threshold),
'force_suppress': force_suppress, 'top_k': int(nms_topk)}
data, valid_count = get_nnvm_op('multibox_transform_loc')(inputs[0], inputs[1],
inputs[2], **new_attrs0)
return get_nnvm_op('non_max_suppression')(data, valid_count, **new_attrs1)
def _elemwise_sum(inputs, _):
new_attrs = {'num_args':len(inputs)}
return get_nnvm_op('elemwise_sum')(*inputs, **new_attrs)
def _crop_like(inputs, attrs):
new_attrs = {}
offsets = \
tuple([float(x.strip()) for x in attrs.get('offsets').strip('()').split(',')]) \
if attrs.get('offsets') is not None else (0, 0)
if offsets != (0, 0):
raise tvm.error.OpAttributeInvalid(
'crop_like offsets must equal (0,0).')
center_crop = parse_bool_str(attrs, 'center_crop', default="False")
if center_crop:
raise tvm.error.OpAttributeUnimplemented(
'Center crop is not supported in operator crop_like.')
if len(inputs) < 2:
raise tvm.error.OpAttributeUnimplemented("Only support crop_like pattern.")
new_attrs["axis"] = [2, 3]
return get_nnvm_op('slice_like')(inputs[0], inputs[1], **new_attrs)
def _expand_dims(inputs, attrs):
op_name, new_attrs = 'expand_dims', {}
new_attrs['axis'] = required_attr(attrs, 'axis', 'expand_dims')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _lrn(inputs, attrs):
op_name, new_attrs = 'lrn', {}
new_attrs['alpha'] = attrs.get('alpha', 0.0001)
new_attrs['beta'] = attrs.get('beta', 0.75)
new_attrs['bias'] = attrs.get('knorm', 2)
# NCHW format and normalization along channel axis
new_attrs['axis'] = 1
new_attrs['size'] = required_attr(attrs, 'nsize', 'lrn')
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _symbol_ring_buffer(inputs, attrs):
output = _get_nnvm_op('ring_buffer')(*inputs, **attrs)
return _sym._assign(inputs[1], output)
def _copy(inputs, _):
return _get_nnvm_op('copy')(inputs[0], **{})
def _argmax(inputs, attrs):
return _get_nnvm_op('argmax')(*inputs, **attrs)
def _minimum(inputs, attrs):
return get_nnvm_op('broadcast_min')(*inputs, **attrs)
def _maximum(inputs, attrs):
return get_nnvm_op('broadcast_max')(*inputs, **attrs)
def _ones(_, attrs):
op_name = 'ones'
return get_nnvm_op(op_name)(**attrs)
def _zeros(_, attrs):
op_name = 'zeros'
return get_nnvm_op(op_name)(**attrs)
def _argmax(inputs, attrs):
op_name, new_attrs = 'argmax', {}
new_attrs['dtype'] = 'float32'
new_attrs['axis'] = attrs.get('axis', 0)
new_attrs['keepdims'] = parse_bool_str(attrs, 'keepdims', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
def _argmin(inputs, attrs):
op_name, new_attrs = 'argmin', {}
new_attrs['dtype'] = 'float32'
new_attrs['axis'] = attrs.get('axis', 0)
new_attrs['keepdims'] = parse_bool_str(attrs, 'keepdims', default="False")
return get_nnvm_op(op_name)(*inputs, **new_attrs)
_identity_list = ['__add_scalar__', '__add_symbol__', '__div_scalar__',
'__div_symbol__', '__mul_scalar__', '__mul_symbol__',
'__pow_scalar__', '__rdiv_scalar__', '__rpow_scalar__',
'__rsub_scalar__', '__sub_scalar__', '__sub_symbol__',
'broadcast_add', 'broadcast_div', 'broadcast_mul',
'broadcast_sub', 'broadcast_to', 'cast', 'elemwise_add',
'elemwise_div', 'elemwise_mul', 'elemwise_sub', 'exp',
'flatten', 'log', 'log_softmax', 'max', 'min', 'negative',
'ones_like', 'relu', 'sigmoid', 'slice_like', 'softmax',
'sum', 'tanh', 'transpose', 'zeros_like', 'gather_nd',
'reshape_like', 'where']
_convert_map = {
'_copy' : _rename('copy'),
'_div_scalar' : _rename('__div_scalar__'),
'_minus_scalar' : _rename('__sub_scalar__'),
'_mul_scalar' : _rename('__mul_scalar__'),
'_plus_scalar' : _rename('__add_scalar__'),
'_rdiv_scalar' : _rename('__rdiv_scalar__'),
'_rminus_scalar': _rename('__rsub_scalar__'),
'_contrib_MultiBoxPrior' : _rename('multibox_prior'),
'_contrib_MultiBoxDetection' : _contrib_multibox_detection,
'_minimum' : _minimum,
'_maximum' : _maximum,
'_ones' : _ones,
'_zeros' : _zeros,
'argmax' : _argmax,
'argmin' : _argmin,
'Activation' : _activations,
'BatchNorm' : _batch_norm,
'BatchNorm_v1' : _batch_norm,
'Cast' : _rename('cast'),
'Concat' : _concat,
'Convolution' : _conv2d,
'Convolution_v1': _conv2d,
'Crop' : _crop_like,
'Deconvolution' : _conv2d_transpose,
'Dropout' : _dropout,
'Flatten' : _rename('flatten'),
'FullyConnected': _dense,
'LeakyReLU' : _leaky_relu,
'Pooling' : _pooling,
'Pooling_v1' : _pooling,
'Reshape' : _reshape,
'slice' : _slice,
'SliceChannel' : _split,
'split' : _split,
'Softmax' : _rename('softmax'),
'SoftmaxActivation' : _softmax_activation,
'SoftmaxOutput' : _softmax_output,
'add_n' : _elemwise_sum,
'concat' : _concat,
'max_axis' : _rename('max'),
'min_axis' : _rename('min'),
'reshape' : _reshape,
'sum_axis' : _rename('sum'),
'UpSampling' : _upsampling,
'clip' : _clip,
'expand_dims' : _expand_dims,
'LRN' : _lrn,
'ring_buffer' : _symbol_ring_buffer,
'LinearRegressionOutput' : _copy
}
def _convert_symbol(op_name, inputs, attrs,
identity_list=None,
convert_map=None):
"""Convert from mxnet op to nnvm op.
The converter must specify some conversions explicitly to
support gluon format ops such as conv2d...
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of nnvm.Symbol
List of input symbols.
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to nnvm, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
sym : nnvm.Symbol
Converted nnvm Symbol
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
op = get_nnvm_op(op_name)
sym = op(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend MXNet.'.format(op_name))
return sym
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
def _topo_sort(symbol):
"""Sort all symbols in the mxnet graph in topological order.
Parameters
----------
symbol : mxnet.sym.Symbol
Returns:
-------
list
List of mxnet symbol
"""
queue = []
symbol_map = {}
deps = {}
dep_cnts = {}
for s in symbol:
symbol_map[s.attr('name')] = s
queue.append(s)
while queue:
sym = queue.pop(0)
name = sym.attr('name')
childs = sym.get_children()
if childs is None:
dep_cnts[name] = 0
else:
dep_cnts[name] = len({c.attr('name') for c in childs})
for child in childs:
child_name = child.attr('name')
if child_name not in deps:
deps[child_name] = set()
deps[child_name].add(name)
if child_name not in symbol_map:
symbol_map[child_name] = child
queue.append(child)
order = []
while dep_cnts:
remove = []
for name in dep_cnts:
if dep_cnts[name] == 0:
order.append(symbol_map[name])
remove.append(name)
if name in deps:
for other in deps[name]:
dep_cnts[other] -= 1
for name in remove:
del dep_cnts[name]
return order
def _from_mxnet_impl(symbol, graph):
"""Convert mxnet symbol to nnvm implementation.
Reconstruct a nnvm symbol by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet, sharing similar graph structure.
The op_name and attrs inside are not always compatible.
graph : dict
Reusable nodes are stored in graph.
Returns:
-------
nnvm.sym.Symbol
Converted symbol
"""
def get_node(sym):
name = sym.attr('name')
if name not in graph:
return None
output_index = json.loads(sym.tojson())['heads'][0][1]
return graph[name][output_index]
assert symbol is not None
# Traverse all symbols in topological order
for sym in _topo_sort(symbol):
name = sym.attr('name')
attr = sym.list_attr()
op_name = sym.attr('op_name')
childs = sym.get_children()
if childs is not None:
childs = [get_node(child) for child in childs]
childs = [x for y in childs for x in _as_list(y)]
node = _convert_symbol(op_name, childs, attr)
elif op_name != 'null':
node = _convert_symbol(op_name, [], attr)
else:
node = _sym.Variable(name=name, **attr)
graph[name] = node
nodes = []
for sym in symbol:
node = get_node(sym)
assert node is not None
nodes.append(node)
if len(nodes) > 1:
return _sym.Group(nodes)
return nodes[0]
def from_mxnet(symbol, arg_params=None, aux_params=None):
"""Convert from MXNet's model into compatible NNVM format.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx
except ImportError as e:
raise ImportError('{}. MXNet is required to parse symbols.'.format(e))
if isinstance(symbol, mx.sym.Symbol):
sym = _from_mxnet_impl(symbol, {})
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = tvm.nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = tvm.nd.array(v.asnumpy())
elif isinstance(symbol, mx.gluon.HybridBlock):
data = mx.sym.Variable('data')
sym = symbol(data)
sym = _from_mxnet_impl(sym, {})
params = {}
for k, v in symbol.collect_params().items():
params[k] = tvm.nd.array(v.data().asnumpy())
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
if isinstance(sym, list):
sym = _sym.Group(sym)
return sym, params
|
the-stack_0_17955 | from __future__ import annotations
import importlib
from typing import (
TYPE_CHECKING,
Optional,
Sequence,
Tuple,
Union,
)
from pandas._config import get_option
from pandas._typing import IndexLabel
from pandas.util._decorators import (
Appender,
Substitution,
)
from pandas.core.dtypes.common import (
is_integer,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
if TYPE_CHECKING:
from pandas import DataFrame
def hist_series(
self,
by=None,
ax=None,
grid: bool = True,
xlabelsize: Optional[int] = None,
xrot: Optional[float] = None,
ylabelsize: Optional[int] = None,
yrot: Optional[float] = None,
figsize: Optional[Tuple[int, int]] = None,
bins: Union[int, Sequence[int]] = 10,
backend: Optional[str] = None,
legend: bool = False,
**kwargs,
):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
legend=legend,
**kwargs,
)
def hist_frame(
data: DataFrame,
column: IndexLabel = None,
by=None,
grid: bool = True,
xlabelsize: Optional[int] = None,
xrot: Optional[float] = None,
ylabelsize: Optional[int] = None,
yrot: Optional[float] = None,
ax=None,
sharex: bool = False,
sharey: bool = False,
figsize: Optional[Tuple[int, int]] = None,
layout: Optional[Tuple[int, int]] = None,
bins: Union[int, Sequence[int]] = 10,
backend: Optional[str] = None,
legend: bool = False,
**kwargs,
):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : str or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
This example draws a histogram based on the length and width of
some animals, displayed in three bins
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
legend=legend,
bins=bins,
**kwargs,
)
_boxplot_doc = """
Make a box plot from DataFrame columns.
Make a box-and-whisker plot from DataFrame columns, optionally grouped
by some other columns. A box plot is a method for graphically depicting
groups of numerical data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. By default, they extend no more than
`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest
data point within that interval. Outliers are plotted as separate dots.
For further details see
Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
Parameters
----------
column : str or list of str, optional
Column name or list of names, or vector.
Can be any valid input to :meth:`pandas.DataFrame.groupby`.
by : str or array-like, optional
Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
One box-plot will be done per value of columns in `by`.
ax : object of class matplotlib.axes.Axes, optional
The matplotlib axes to be used by boxplot.
fontsize : float or str
Tick label font size in points or as a string (e.g., `large`).
rot : int or float, default 0
The rotation angle of labels (in degrees)
with respect to the screen coordinate system.
grid : bool, default True
Setting this to True will show the grid.
figsize : A tuple (width, height) in inches
The size of the figure to create in matplotlib.
layout : tuple (rows, columns), optional
For example, (3, 5) will display the subplots
using 3 columns and 5 rows, starting from the top-left.
return_type : {'axes', 'dict', 'both'} or None, default 'axes'
The kind of object to return. The default is ``axes``.
* 'axes' returns the matplotlib axes the boxplot is drawn on.
* 'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot.
* 'both' returns a namedtuple with the axes and dict.
* when grouping with ``by``, a Series mapping columns to
``return_type`` is returned.
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
%(backend)s\
**kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
Returns
-------
result
See Notes.
See Also
--------
Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
-----
The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
* 'both' : a namedtuple with structure (ax, lines)
For data grouped with ``by``, return a Series of the above or a numpy
array:
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
Examples
--------
Boxplots can be created for every column in the dataframe
by ``df.boxplot()`` or indicating the columns to be used:
.. plot::
:context: close-figs
>>> np.random.seed(1234)
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['Col1', 'Col2', 'Col3', 'Col4'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
Boxplots of variables distributions grouped by the values of a third
variable can be created using the option ``by``. For instance:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Col1', 'Col2'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> boxplot = df.boxplot(by='X')
A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
in order to group the data by combination of the variables in the x-axis:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 3),
... columns=['Col1', 'Col2', 'Col3'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
... 'B', 'A', 'B', 'A', 'B'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
The layout of boxplot can be adjusted giving a tuple to ``layout``:
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... layout=(2, 1))
Additional formatting can be done to the boxplot, like suppressing the grid
(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
or changing the fontsize (i.e. ``fontsize=15``):
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
The parameter ``return_type`` can be used to select the type of element
returned by `boxplot`. When ``return_type='axes'`` is selected,
the matplotlib axes on which the boxplot is drawn are returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')
>>> type(boxplot)
<class 'matplotlib.axes._subplots.AxesSubplot'>
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type='axes')
>>> type(boxplot)
<class 'pandas.core.series.Series'>
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
_backend_doc = """\
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
"""
_bar_or_line_doc = """
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array_like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's %(kind)s will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for
column `a` in green and %(kind)ss for column `b` in red.
.. versionadded:: 1.1.0
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
"""
@Substitution(backend="")
@Appender(_boxplot_doc)
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs,
):
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.boxplot(
data,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
@Substitution(backend=_backend_doc)
@Appender(_boxplot_doc)
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
backend=None,
**kwargs,
):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group.
column : column name or list of names, or vector
Can be any valid input to groupby.
fontsize : int or str
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
The layout of the plot: (rows, columns).
sharex : bool, default False
Whether x-axes will be shared among subplots.
sharey : bool, default True
Whether y-axes will be shared among subplots.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function.
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
You can create boxplots for grouped data and show them as separate subplots:
.. plot::
:context: close-figs
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>> grouped = df.groupby(level='lvl1')
>>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10))
The ``subplots=False`` option shows the boxplots in a single figure.
.. plot::
:context: close-figs
>>> grouped.boxplot(subplots=False, rot=45, fontsize=12)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame_groupby(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
sharex=sharex,
sharey=sharey,
**kwargs,
)
class PlotAccessor(PandasObject):
"""
Make plots of Series or DataFrame.
Uses the backend specified by the
option ``plotting.backend``. By default, matplotlib is used.
Parameters
----------
data : Series or DataFrame
The object for which the method is called.
x : label or position, default None
Only used if data is a DataFrame.
y : label, position or list of label, positions, default None
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
The kind of plot to produce:
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot (DataFrame only)
- 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool, default False
Make separate subplots for each column.
sharex : bool, default True if ax is None else False
In case ``subplots=True``, share x axis and set some x axis labels
to invisible; defaults to True if ax is None otherwise False if
an ax is passed in; Be aware, that passing in both an ax and
``sharex=True`` will alter all x axis labels for all axis in a figure.
sharey : bool, default False
In case ``subplots=True``, share y axis and set some y axis labels to invisible.
layout : tuple, optional
(rows, columns) for the layout of subplots.
figsize : a tuple (width, height) in inches
Size of a figure object.
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
grid : bool, default None (matlab style default)
Axis grid lines.
legend : bool or {'reverse'}
Place legend on axis subplots.
style : list or dict
The matplotlib line style per column.
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis.
.. versionchanged:: 0.25.0
logy : bool or 'sym' default False
Use log scaling or symlog scaling on y axis.
.. versionchanged:: 0.25.0
loglog : bool or 'sym', default False
Use log scaling or symlog scaling on both x and y axes.
.. versionchanged:: 0.25.0
xticks : sequence
Values to use for the xticks.
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
Set the x limits of the current axes.
ylim : 2-tuple/list
Set the y limits of the current axes.
xlabel : label, optional
Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the
x-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
ylabel : label, optional
Name to use for the ylabel on y-axis. Default will show no ylabel, or the
y-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
fontsize : int, default None
Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
colorbar : bool, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin'
plots).
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center).
table : bool, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data
will be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a
table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
stacked : bool, default False in line and bar plots, and True in area plot
If True, create stacked plot.
sort_columns : bool, default False
Sort column names to determine plot ordering.
secondary_y : bool or sequence, default False
Whether to plot on the secondary y-axis if a list/tuple, which
columns to plot on secondary y-axis.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
If the backend is not the default matplotlib one, the return value
will be the object returned by the backend.
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center)
"""
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
_dataframe_kinds = ("scatter", "hexbin")
_kind_aliases = {"density": "kde"}
_all_kinds = _common_kinds + _series_kinds + _dataframe_kinds
def __init__(self, data):
self._parent = data
@staticmethod
def _get_call_args(backend_name, data, args, kwargs):
"""
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
"""
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
f"{name}={repr(value)}" for (name, default), value in zip(arg_def, args)
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for value, (name, _) in zip(args, arg_def)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
)
kind = self._kind_aliases.get(kind, kind)
# when using another backend, get out of the way
if plot_backend.__name__ != "pandas.plotting._matplotlib":
return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
if kind not in self._all_kinds:
raise ValueError(f"{kind} is not a valid plot kind")
# The original data structured can be transformed before passed to the
# backend. For example, for DataFrame is common to set the index as the
# `x` parameter, and return a Series with the parameter `y` as values.
data = self._parent.copy()
if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
if isinstance(data, ABCDataFrame):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(f"plot kind {kind} can only be used for data frames")
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and kwargs.get("subplots") is False:
raise ValueError(
f"{kind} requires either y column or 'subplots=True'"
)
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
elif isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not data.columns.holds_integer():
y = data_cols[y]
label_kw = kwargs["label"] if "label" in kwargs else False
for kw in ["xerr", "yerr"]:
if kw in kwargs and (
isinstance(kwargs[kw], str) or is_integer(kwargs[kw])
):
try:
kwargs[kw] = data[kwargs[kw]]
except (IndexError, KeyError, TypeError):
pass
# don't overwrite
data = data[y].copy()
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
match = is_list_like(label_kw) and len(label_kw) == len(y)
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
return plot_backend.plot(data, kind=kind, **kwargs)
__call__.__doc__ = __doc__
@Appender(
"""
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line()
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
Let's repeat the same example, but specifying colors for
each column (in this case, for each animal).
>>> axes = df.plot.line(
... subplots=True, color={"pig": "pink", "horse": "#742802"}
... )
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
)
@Substitution(kind="line")
@Appender(_bar_or_line_doc)
def line(self, x=None, y=None, **kwargs):
"""
Plot Series or DataFrame as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
"""
return self(kind="line", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Plot stacked bar charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.bar(stacked=True)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
If you don't like the default colours, you can specify how you'd
like each column to be colored.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(
... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}
... )
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def bar(self, x=None, y=None, **kwargs):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="bar", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot stacked barh charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.barh(stacked=True)
We can specify colors for each column
.. plot::
:context: close-figs
>>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, by=None, **kwargs):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind="box", by=by, **kwargs)
def hist(self, by=None, bins=10, **kwargs):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", by=by, bins=bins, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, x=None, y=None, **kwargs):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind="area", x=x, y=y, **kwargs)
def pie(self, **kwargs):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(11, 6))
"""
if (
isinstance(self._parent, ABCDataFrame)
and kwargs.get("y", None) is None
and not kwargs.get("subplots", False)
):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", **kwargs)
def scatter(self, x, y, s=None, c=None, **kwargs):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : str, scalar or array_like, optional
The size of each point. Possible values are:
- A string with the name of the column to be used for marker's size.
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
.. versionchanged:: 1.1.0
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
if gridsize is not None:
kwargs["gridsize"] = gridsize
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
_backends = {}
def _find_backend(backend: str):
"""
Find a pandas plotting backend>
Parameters
----------
backend : str
The identifier for the backend. Either an entrypoint item registered
with pkg_resources, or a module name.
Notes
-----
Modifies _backends with imported backends as a side effect.
Returns
-------
types.ModuleType
The imported backend.
"""
import pkg_resources # Delay import for performance.
for entry_point in pkg_resources.iter_entry_points("pandas_plotting_backends"):
if entry_point.name == "matplotlib":
# matplotlib is an optional dependency. When
# missing, this would raise.
continue
_backends[entry_point.name] = entry_point.load()
try:
return _backends[backend]
except KeyError:
# Fall back to unregistered, module name approach.
try:
module = importlib.import_module(backend)
except ImportError:
# We re-raise later on.
pass
else:
if hasattr(module, "plot"):
# Validate that the interface is implemented when the option
# is set, rather than at plot time.
_backends[backend] = module
return module
raise ValueError(
f"Could not find plotting backend '{backend}'. Ensure that you've installed "
f"the package providing the '{backend}' entrypoint, or that the package has a "
"top-level `.plot` method."
)
def _get_plot_backend(backend=None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
The plotting system of pandas has been using matplotlib, but the idea here
is that it can also work with other third-party backends. In the future,
this function will return the backend from a pandas option, and all the
rest of the code in this file will use the backend specified there for the
plotting.
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
"""
backend = backend or get_option("plotting.backend")
if backend == "matplotlib":
# Because matplotlib is an optional dependency and first-party backend,
# we need to attempt an import here to raise an ImportError if needed.
try:
import pandas.plotting._matplotlib as module
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
'default backend "matplotlib" is selected.'
) from None
_backends["matplotlib"] = module
if backend in _backends:
return _backends[backend]
module = _find_backend(backend)
_backends[backend] = module
return module
|
the-stack_0_17956 | import argparse, os, collections
try:
import fcntl
def flock(f):
fcntl.flock(f, fcntl.LOCK_EX)
def funlock(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
# probably using windows
# rely on opportunistic file writing (hope that scenarios aren't
# added to completed_scenarios.txt at the same time by parallel processes)
# TODO: add support for file locking on windows, e.g., like
# https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s25.html
def flock(f):
pass
def funlock(f):
pass
def iterify(item):
"""Return an iterable for the one or more items passed."""
if isinstance(item, basestring):
i = iter([item])
else:
try:
# check if it's iterable
i = iter(item)
except TypeError:
i = iter([item])
return i
class AddModuleAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
for m in iterify(values):
setattr(namespace, m, True)
class RemoveModuleAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
for m in iterify(values):
setattr(namespace, m, False)
class AddListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, list())
getattr(namespace, self.dest).extend(iterify(values))
# define a standard argument parser, which can be used to setup scenarios
# NOTE: you can't safely use default values here, because those end up being
# assigned to cmd_line_args(), and then they override any values set for the
# standard scenarios.
parser = argparse.ArgumentParser(description='Solve one or more Switch-Hawaii scenarios.')
parser.add_argument('--inputs', dest='inputs_dir')
parser.add_argument('--inputs_subdir')
parser.add_argument('--outputs', dest='outputs_dir')
parser.add_argument('--scenario', action=AddListAction, dest='scenario_to_run')
parser.add_argument('--scenarios', action=AddListAction, nargs='+', dest='scenario_to_run')
parser.add_argument('--scenario_name')
parser.add_argument('--exclude', action=AddModuleAction, dest='exclude_module', nargs='+')
parser.add_argument('-n', action=RemoveModuleAction, dest='exclude_module')
parser.add_argument('--include', action=AddModuleAction, dest='include_module', nargs='+')
parser.add_argument('-y', action=AddModuleAction, dest='include_module')
parser.add_argument(action=AddModuleAction, dest='include_module', nargs='*')
def args_dict(*a):
"""call the parser to get the args, then return them as a dictionary, omitting None's'"""
return {k: v for k, v in vars(parser.parse_args(*a)).iteritems() if v is not None}
# report current command line arguments for use by various functions
# This is a function instead of a constant, so users can call
# scenarios.parser.add_argument() to add arguments of their own before evaluation
def cmd_line_args():
return args_dict()
def get_required_scenario_names():
"""Return list of names of scenario(s) that were requested or defined from the command line
via --scenario[s] or --scenario_name.
Return an empty list if none were requested/defined."""
a = cmd_line_args()
if "scenario_to_run" in a:
return a["scenario_to_run"]
elif "scenario_name" in a or not os.path.isfile('scenarios_to_run.txt'):
# They have defined one specific scenario on the command line, which is not based on any standard scenario,
# or there are no standard scenarios.
# Return a no-name scenario, which indicates to build the scenario without referring to any standard scenario.
return ['']
else:
# no specific scenarios were requested on the command line; run the standard scenarios instead
return []
def start_next_standard_scenario():
"""find the next scenario definition in 'scenarios_to_run.txt' that isn't reported
as having been completed in 'completed_scenarios.txt'.
Then report it as completed and return the scenario arguments
(including any modifications from the command line)."""
scenarios_list = get_standard_scenarios_dict()
for (s, args) in scenarios_list.iteritems():
if scenario_already_run(s):
continue
else:
return merge_scenarios(args, cmd_line_args())
return None # no more scenarios to run
def get_scenario_args(scenario):
"""Return the arguments for the specified standard scenario, amended with any command-line arguments.
This may also be called with an empty scenario name ('') to define a scenario using only command-line arguments."""
if scenario == '':
return merge_scenarios(cmd_line_args())
else:
scenario_list = get_standard_scenarios_dict()
if scenario not in scenario_list:
raise RuntimeError("Scenario {s} has not been defined.".format(s=scenario))
else:
return merge_scenarios(scenario_list[scenario], cmd_line_args())
def get_standard_scenarios_dict():
"""Return collection of standard scenarios, as defined in scenarios_to_run.txt.
They are returned as an OrderedDict with keys equal to the scenario names and values
that are each a dictionary of arguments for that scenario."""
# note: we read the list from the disk each time so that we get a fresher version
# if the standard list is changed during a long solution effort.
with open('scenarios_to_run.txt', 'r') as f:
# wait for exclusive access to the file (to avoid reading while the file is being changed)
flock(f)
scenarios_list = list(f.read().splitlines()) # note: ignores presence/absence of \n at end of file
funlock(f)
args_list = [args_dict(s.split(' ')) for s in scenarios_list]
return collections.OrderedDict([(s["scenario_name"], s) for s in args_list])
def merge_scenarios(*scenarios):
# combine scenarios: start with the first and then apply most settings from later ones
# but concatenate "tag" entries and remove "scenario_to_run" entries
d = dict(tag='')
for s in scenarios:
t1 = d["tag"]
t2 = s.get("tag", "")
s["tag"] = t1 + ("" if t1 == "" or t2 == "" else "_") + t2
d.update(s)
if 'scenario_to_run' in d:
del d['scenario_to_run']
return d
def report_completed_scenario(scenario):
scenario_already_run(scenario)
def scenario_already_run(scenario):
"""Add the specified scenario to the list in completed_scenarios.txt.
Return False if it wasn't there already."""
with open('completed_scenarios.txt', 'a+') as f:
# wait for exclusive access to the list (to avoid writing the same scenario twice in a race condition)
flock(f)
# file starts with pointer at end; move to start
f.seek(0, 0)
if scenario + '\n' in f:
already_run = True
else:
already_run = False
# append name to the list (will always go at end, because file was opened in 'a' mode)
f.write(scenario + '\n')
funlock(f)
return already_run
|
the-stack_0_17959 | #!/usr/bin/env python3
import re
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
# Usernames can't be shorter than minlen
if len(username) < minlen:
return False
# Usernames can only use letters, numbers, dots and underscores
if not re.match('^[a-z0-9._]*$', username):
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
if username[0] == "_":
return False
if username[0] == ".":
return False
return True
|
the-stack_0_17961 | # Author: Steven J. Bethard <[email protected]>.
import codecs
import inspect
import os
import shutil
import stat
import sys
import textwrap
import tempfile
import unittest
import argparse
from StringIO import StringIO
class StdIOBuffer(StringIO):
pass
from test import test_support
class TestCase(unittest.TestCase):
def assertEqual(self, obj1, obj2):
if obj1 != obj2:
print('')
print(repr(obj1))
print(repr(obj2))
print(obj1)
print(obj2)
super(TestCase, self).assertEqual(obj1, obj2)
def setUp(self):
# The tests assume that line wrapping occurs at 80 columns, but this
# behaviour can be overridden by setting the COLUMNS environment
# variable. To ensure that this assumption is true, unset COLUMNS.
env = test_support.EnvironmentVarGuard()
env.unset("COLUMNS")
self.addCleanup(env.__exit__)
class TempDirMixin(object):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.old_dir = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_dir)
shutil.rmtree(self.temp_dir, True)
def create_readonly_file(self, filename):
file_path = os.path.join(self.temp_dir, filename)
with open(file_path, 'w') as file:
file.write(filename)
os.chmod(file_path, stat.S_IREAD)
class Sig(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class NS(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
sorted_items = sorted(self.__dict__.items())
kwarg_str = ', '.join(['%s=%r' % tup for tup in sorted_items])
return '%s(%s)' % (type(self).__name__, kwarg_str)
__hash__ = None
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class ArgumentParserError(Exception):
def __init__(self, message, stdout=None, stderr=None, error_code=None):
Exception.__init__(self, message, stdout, stderr)
self.message = message
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def stderr_to_parser_error(parse_args, *args, **kwargs):
# if this is being called recursively and stderr or stdout is already being
# redirected, simply call the function and let the enclosing function
# catch the exception
if isinstance(sys.stderr, StdIOBuffer) or isinstance(sys.stdout, StdIOBuffer):
return parse_args(*args, **kwargs)
# if this is not being called recursively, redirect stderr and
# use it as the ArgumentParserError message
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StdIOBuffer()
sys.stderr = StdIOBuffer()
try:
try:
result = parse_args(*args, **kwargs)
for key in list(vars(result)):
if getattr(result, key) is sys.stdout:
setattr(result, key, old_stdout)
if getattr(result, key) is sys.stderr:
setattr(result, key, old_stderr)
return result
except SystemExit:
code = sys.exc_info()[1].code
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
raise ArgumentParserError("SystemExit", stdout, stderr, code)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class ErrorRaisingArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
parse_args = super(ErrorRaisingArgumentParser, self).parse_args
return stderr_to_parser_error(parse_args, *args, **kwargs)
def exit(self, *args, **kwargs):
exit = super(ErrorRaisingArgumentParser, self).exit
return stderr_to_parser_error(exit, *args, **kwargs)
def error(self, *args, **kwargs):
error = super(ErrorRaisingArgumentParser, self).error
return stderr_to_parser_error(error, *args, **kwargs)
class ParserTesterMetaclass(type):
"""Adds parser tests using the class attributes.
Classes of this type should specify the following attributes:
argument_signatures -- a list of Sig objects which specify
the signatures of Argument objects to be created
failures -- a list of args lists that should cause the parser
to fail
successes -- a list of (initial_args, options, remaining_args) tuples
where initial_args specifies the string args to be parsed,
options is a dict that should match the vars() of the options
parsed out of initial_args, and remaining_args should be any
remaining unparsed arguments
"""
def __init__(cls, name, bases, bodydict):
if name == 'ParserTestCase':
return
# default parser signature is empty
if not hasattr(cls, 'parser_signature'):
cls.parser_signature = Sig()
if not hasattr(cls, 'parser_class'):
cls.parser_class = ErrorRaisingArgumentParser
# ---------------------------------------
# functions for adding optional arguments
# ---------------------------------------
def no_groups(parser, argument_signatures):
"""Add all arguments directly to the parser"""
for sig in argument_signatures:
parser.add_argument(*sig.args, **sig.kwargs)
def one_group(parser, argument_signatures):
"""Add all arguments under a single group in the parser"""
group = parser.add_argument_group('foo')
for sig in argument_signatures:
group.add_argument(*sig.args, **sig.kwargs)
def many_groups(parser, argument_signatures):
"""Add each argument in its own group to the parser"""
for i, sig in enumerate(argument_signatures):
group = parser.add_argument_group('foo:%i' % i)
group.add_argument(*sig.args, **sig.kwargs)
# --------------------------
# functions for parsing args
# --------------------------
def listargs(parser, args):
"""Parse the args by passing in a list"""
return parser.parse_args(args)
def sysargs(parser, args):
"""Parse the args by defaulting to sys.argv"""
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
try:
return parser.parse_args()
finally:
sys.argv = old_sys_argv
# class that holds the combination of one optional argument
# addition method and one arg parsing method
class AddTests(object):
def __init__(self, tester_cls, add_arguments, parse_args):
self._add_arguments = add_arguments
self._parse_args = parse_args
add_arguments_name = self._add_arguments.__name__
parse_args_name = self._parse_args.__name__
for test_func in [self.test_failures, self.test_successes]:
func_name = test_func.__name__
names = func_name, add_arguments_name, parse_args_name
test_name = '_'.join(names)
def wrapper(self, test_func=test_func):
test_func(self)
try:
wrapper.__name__ = test_name
except TypeError:
pass
setattr(tester_cls, test_name, wrapper)
def _get_parser(self, tester):
args = tester.parser_signature.args
kwargs = tester.parser_signature.kwargs
parser = tester.parser_class(*args, **kwargs)
self._add_arguments(parser, tester.argument_signatures)
return parser
def test_failures(self, tester):
parser = self._get_parser(tester)
for args_str in tester.failures:
args = args_str.split()
raises = tester.assertRaises
raises(ArgumentParserError, parser.parse_args, args)
def test_successes(self, tester):
parser = self._get_parser(tester)
for args, expected_ns in tester.successes:
if isinstance(args, str):
args = args.split()
result_ns = self._parse_args(parser, args)
tester.assertEqual(expected_ns, result_ns)
# add tests for each combination of an optionals adding method
# and an arg parsing method
for add_arguments in [no_groups, one_group, many_groups]:
for parse_args in [listargs, sysargs]:
AddTests(cls, add_arguments, parse_args)
bases = TestCase,
ParserTestCase = ParserTesterMetaclass('ParserTestCase', bases, {})
# ===============
# Optionals tests
# ===============
class TestOptionalsSingleDash(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [Sig('-x')]
failures = ['-x', 'a', '--foo', '-x --foo', '-x -y']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
('-xa', NS(x='a')),
('-x -1', NS(x='-1')),
('-x-1', NS(x='-1')),
]
class TestOptionalsSingleDashCombined(ParserTestCase):
"""Test an Optional with a single-dash option string"""
argument_signatures = [
Sig('-x', action='store_true'),
Sig('-yyy', action='store_const', const=42),
Sig('-z'),
]
failures = ['a', '--foo', '-xa', '-x --foo', '-x -z', '-z -x',
'-yx', '-yz a', '-yyyx', '-yyyza', '-xyza']
successes = [
('', NS(x=False, yyy=None, z=None)),
('-x', NS(x=True, yyy=None, z=None)),
('-za', NS(x=False, yyy=None, z='a')),
('-z a', NS(x=False, yyy=None, z='a')),
('-xza', NS(x=True, yyy=None, z='a')),
('-xz a', NS(x=True, yyy=None, z='a')),
('-x -za', NS(x=True, yyy=None, z='a')),
('-x -z a', NS(x=True, yyy=None, z='a')),
('-y', NS(x=False, yyy=42, z=None)),
('-yyy', NS(x=False, yyy=42, z=None)),
('-x -yyy -za', NS(x=True, yyy=42, z='a')),
('-x -yyy -z a', NS(x=True, yyy=42, z='a')),
]
class TestOptionalsSingleDashLong(ParserTestCase):
"""Test an Optional with a multi-character single-dash option string"""
argument_signatures = [Sig('-foo')]
failures = ['-foo', 'a', '--foo', '-foo --foo', '-foo -y', '-fooa']
successes = [
('', NS(foo=None)),
('-foo a', NS(foo='a')),
('-foo -1', NS(foo='-1')),
('-fo a', NS(foo='a')),
('-f a', NS(foo='a')),
]
class TestOptionalsSingleDashSubsetAmbiguous(ParserTestCase):
"""Test Optionals where option strings are subsets of each other"""
argument_signatures = [Sig('-f'), Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-foo', '-fo', '-foo b', '-foob', '-fooba', '-foora']
successes = [
('', NS(f=None, foobar=None, foorab=None)),
('-f a', NS(f='a', foobar=None, foorab=None)),
('-fa', NS(f='a', foobar=None, foorab=None)),
('-foa', NS(f='oa', foobar=None, foorab=None)),
('-fooa', NS(f='ooa', foobar=None, foorab=None)),
('-foobar a', NS(f=None, foobar='a', foorab=None)),
('-foorab a', NS(f=None, foobar=None, foorab='a')),
]
class TestOptionalsSingleDashAmbiguous(ParserTestCase):
"""Test Optionals that partially match but are not subsets"""
argument_signatures = [Sig('-foobar'), Sig('-foorab')]
failures = ['-f', '-f a', '-fa', '-foa', '-foo', '-fo', '-foo b']
successes = [
('', NS(foobar=None, foorab=None)),
('-foob a', NS(foobar='a', foorab=None)),
('-foor a', NS(foobar=None, foorab='a')),
('-fooba a', NS(foobar='a', foorab=None)),
('-foora a', NS(foobar=None, foorab='a')),
('-foobar a', NS(foobar='a', foorab=None)),
('-foorab a', NS(foobar=None, foorab='a')),
]
class TestOptionalsNumeric(ParserTestCase):
"""Test an Optional with a short opt string"""
argument_signatures = [Sig('-1', dest='one')]
failures = ['-1', 'a', '-1 --foo', '-1 -y', '-1 -1', '-1 -2']
successes = [
('', NS(one=None)),
('-1 a', NS(one='a')),
('-1a', NS(one='a')),
('-1-2', NS(one='-2')),
]
class TestOptionalsDoubleDash(ParserTestCase):
"""Test an Optional with a double-dash option string"""
argument_signatures = [Sig('--foo')]
failures = ['--foo', '-f', '-f a', 'a', '--foo -x', '--foo --bar']
successes = [
('', NS(foo=None)),
('--foo a', NS(foo='a')),
('--foo=a', NS(foo='a')),
('--foo -2.5', NS(foo='-2.5')),
('--foo=-2.5', NS(foo='-2.5')),
]
class TestOptionalsDoubleDashPartialMatch(ParserTestCase):
"""Tests partial matching with a double-dash option string"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--bat'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--ba=4', '--badge 5']
successes = [
('', NS(badger=False, bat=None)),
('--bat X', NS(badger=False, bat='X')),
('--bad', NS(badger=True, bat=None)),
('--badg', NS(badger=True, bat=None)),
('--badge', NS(badger=True, bat=None)),
('--badger', NS(badger=True, bat=None)),
]
class TestOptionalsDoubleDashPrefixMatch(ParserTestCase):
"""Tests when one double-dash option string is a prefix of another"""
argument_signatures = [
Sig('--badger', action='store_true'),
Sig('--ba'),
]
failures = ['--bar', '--b', '--ba', '--b=2', '--badge 5']
successes = [
('', NS(badger=False, ba=None)),
('--ba X', NS(badger=False, ba='X')),
('--ba=X', NS(badger=False, ba='X')),
('--bad', NS(badger=True, ba=None)),
('--badg', NS(badger=True, ba=None)),
('--badge', NS(badger=True, ba=None)),
('--badger', NS(badger=True, ba=None)),
]
class TestOptionalsSingleDoubleDash(ParserTestCase):
"""Test an Optional with single- and double-dash option strings"""
argument_signatures = [
Sig('-f', action='store_true'),
Sig('--bar'),
Sig('-baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-fbaz', '-bazf', '-b B', 'B']
successes = [
('', NS(f=False, bar=None, baz=None)),
('-f', NS(f=True, bar=None, baz=None)),
('--ba B', NS(f=False, bar='B', baz=None)),
('-f --bar B', NS(f=True, bar='B', baz=None)),
('-f -b', NS(f=True, bar=None, baz=42)),
('-ba -f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixChars(ParserTestCase):
"""Test an Optional with option strings with custom prefixes"""
parser_signature = Sig(prefix_chars='+:/', add_help=False)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz', '-h', '--help', '+h', '::help', '/help']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42)),
]
class TestOptionalsAlternatePrefixCharsAddedHelp(ParserTestCase):
"""When ``-`` not in prefix_chars, default operators created for help
should use the prefix_chars in use rather than - or --
http://bugs.python.org/issue9444"""
parser_signature = Sig(prefix_chars='+:/', add_help=True)
argument_signatures = [
Sig('+f', action='store_true'),
Sig('::bar'),
Sig('/baz', action='store_const', const=42),
]
failures = ['--bar', '-fbar', '-b B', 'B', '-f', '--bar B', '-baz']
successes = [
('', NS(f=False, bar=None, baz=None)),
('+f', NS(f=True, bar=None, baz=None)),
('::ba B', NS(f=False, bar='B', baz=None)),
('+f ::bar B', NS(f=True, bar='B', baz=None)),
('+f /b', NS(f=True, bar=None, baz=42)),
('/ba +f', NS(f=True, bar=None, baz=42))
]
class TestOptionalsAlternatePrefixCharsMultipleShortArgs(ParserTestCase):
"""Verify that Optionals must be called with their defined prefixes"""
parser_signature = Sig(prefix_chars='+-', add_help=False)
argument_signatures = [
Sig('-x', action='store_true'),
Sig('+y', action='store_true'),
Sig('+z', action='store_true'),
]
failures = ['-w',
'-xyz',
'+x',
'-y',
'+xyz',
]
successes = [
('', NS(x=False, y=False, z=False)),
('-x', NS(x=True, y=False, z=False)),
('+y -x', NS(x=True, y=True, z=False)),
('+yz -x', NS(x=True, y=True, z=True)),
]
class TestOptionalsShortLong(ParserTestCase):
"""Test a combination of single- and double-dash option strings"""
argument_signatures = [
Sig('-v', '--verbose', '-n', '--noisy', action='store_true'),
]
failures = ['--x --verbose', '-N', 'a', '-v x']
successes = [
('', NS(verbose=False)),
('-v', NS(verbose=True)),
('--verbose', NS(verbose=True)),
('-n', NS(verbose=True)),
('--noisy', NS(verbose=True)),
]
class TestOptionalsDest(ParserTestCase):
"""Tests various means of setting destination"""
argument_signatures = [Sig('--foo-bar'), Sig('--baz', dest='zabbaz')]
failures = ['a']
successes = [
('--foo-bar f', NS(foo_bar='f', zabbaz=None)),
('--baz g', NS(foo_bar=None, zabbaz='g')),
('--foo-bar h --baz i', NS(foo_bar='h', zabbaz='i')),
('--baz j --foo-bar k', NS(foo_bar='k', zabbaz='j')),
]
class TestOptionalsDefault(ParserTestCase):
"""Tests specifying a default for an Optional"""
argument_signatures = [Sig('-x'), Sig('-y', default=42)]
failures = ['a']
successes = [
('', NS(x=None, y=42)),
('-xx', NS(x='x', y=42)),
('-yy', NS(x=None, y='y')),
]
class TestOptionalsNargsDefault(ParserTestCase):
"""Tests not specifying the number of args for an Optional"""
argument_signatures = [Sig('-x')]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x='a')),
]
class TestOptionalsNargs1(ParserTestCase):
"""Tests specifying the 1 arg for an Optional"""
argument_signatures = [Sig('-x', nargs=1)]
failures = ['a', '-x']
successes = [
('', NS(x=None)),
('-x a', NS(x=['a'])),
]
class TestOptionalsNargs3(ParserTestCase):
"""Tests specifying the 3 args for an Optional"""
argument_signatures = [Sig('-x', nargs=3)]
failures = ['a', '-x', '-x a', '-x a b', 'a -x', 'a -x b']
successes = [
('', NS(x=None)),
('-x a b c', NS(x=['a', 'b', 'c'])),
]
class TestOptionalsNargsOptional(ParserTestCase):
"""Tests specifying an Optional arg for an Optional"""
argument_signatures = [
Sig('-w', nargs='?'),
Sig('-x', nargs='?', const=42),
Sig('-y', nargs='?', default='spam'),
Sig('-z', nargs='?', type=int, const='42', default='84'),
]
failures = ['2']
successes = [
('', NS(w=None, x=None, y='spam', z=84)),
('-w', NS(w=None, x=None, y='spam', z=84)),
('-w 2', NS(w='2', x=None, y='spam', z=84)),
('-x', NS(w=None, x=42, y='spam', z=84)),
('-x 2', NS(w=None, x='2', y='spam', z=84)),
('-y', NS(w=None, x=None, y=None, z=84)),
('-y 2', NS(w=None, x=None, y='2', z=84)),
('-z', NS(w=None, x=None, y='spam', z=42)),
('-z 2', NS(w=None, x=None, y='spam', z=2)),
]
class TestOptionalsNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [
Sig('-x', nargs='*'),
Sig('-y', nargs='*', default='spam'),
]
failures = ['a']
successes = [
('', NS(x=None, y='spam')),
('-x', NS(x=[], y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y', NS(x=None, y=[])),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsNargsOneOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts one or more"""
argument_signatures = [
Sig('-x', nargs='+'),
Sig('-y', nargs='+', default='spam'),
]
failures = ['a', '-x', '-y', 'a -x', 'a -y b']
successes = [
('', NS(x=None, y='spam')),
('-x a', NS(x=['a'], y='spam')),
('-x a b', NS(x=['a', 'b'], y='spam')),
('-y a', NS(x=None, y=['a'])),
('-y a b', NS(x=None, y=['a', 'b'])),
]
class TestOptionalsChoices(ParserTestCase):
"""Tests specifying the choices for an Optional"""
argument_signatures = [
Sig('-f', choices='abc'),
Sig('-g', type=int, choices=range(5))]
failures = ['a', '-f d', '-fad', '-ga', '-g 6']
successes = [
('', NS(f=None, g=None)),
('-f a', NS(f='a', g=None)),
('-f c', NS(f='c', g=None)),
('-g 0', NS(f=None, g=0)),
('-g 03', NS(f=None, g=3)),
('-fb -g4', NS(f='b', g=4)),
]
class TestOptionalsRequired(ParserTestCase):
"""Tests the an optional action that is required"""
argument_signatures = [
Sig('-x', type=int, required=True),
]
failures = ['a', '']
successes = [
('-x 1', NS(x=1)),
('-x42', NS(x=42)),
]
class TestOptionalsActionStore(ParserTestCase):
"""Tests the store action for an Optional"""
argument_signatures = [Sig('-x', action='store')]
failures = ['a', 'a -x']
successes = [
('', NS(x=None)),
('-xfoo', NS(x='foo')),
]
class TestOptionalsActionStoreConst(ParserTestCase):
"""Tests the store_const action for an Optional"""
argument_signatures = [Sig('-y', action='store_const', const=object)]
failures = ['a']
successes = [
('', NS(y=None)),
('-y', NS(y=object)),
]
class TestOptionalsActionStoreFalse(ParserTestCase):
"""Tests the store_false action for an Optional"""
argument_signatures = [Sig('-z', action='store_false')]
failures = ['a', '-za', '-z a']
successes = [
('', NS(z=True)),
('-z', NS(z=False)),
]
class TestOptionalsActionStoreTrue(ParserTestCase):
"""Tests the store_true action for an Optional"""
argument_signatures = [Sig('--apple', action='store_true')]
failures = ['a', '--apple=b', '--apple b']
successes = [
('', NS(apple=False)),
('--apple', NS(apple=True)),
]
class TestOptionalsActionAppend(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append')]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=None)),
('--baz a', NS(baz=['a'])),
('--baz a --baz b', NS(baz=['a', 'b'])),
]
class TestOptionalsActionAppendWithDefault(ParserTestCase):
"""Tests the append action for an Optional"""
argument_signatures = [Sig('--baz', action='append', default=['X'])]
failures = ['a', '--baz', 'a --baz', '--baz a b']
successes = [
('', NS(baz=['X'])),
('--baz a', NS(baz=['X', 'a'])),
('--baz a --baz b', NS(baz=['X', 'a', 'b'])),
]
class TestOptionalsActionAppendConst(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=None)),
('-b', NS(b=[Exception])),
('-b -cx -b -cyz', NS(b=[Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionAppendConstWithDefault(ParserTestCase):
"""Tests the append_const action for an Optional"""
argument_signatures = [
Sig('-b', action='append_const', const=Exception, default=['X']),
Sig('-c', action='append', dest='b'),
]
failures = ['a', '-c', 'a -c', '-bx', '-b x']
successes = [
('', NS(b=['X'])),
('-b', NS(b=['X', Exception])),
('-b -cx -b -cyz', NS(b=['X', Exception, 'x', Exception, 'yz'])),
]
class TestOptionalsActionCount(ParserTestCase):
"""Tests the count action for an Optional"""
argument_signatures = [Sig('-x', action='count')]
failures = ['a', '-x a', '-x b', '-x a -x b']
successes = [
('', NS(x=None)),
('-x', NS(x=1)),
]
# ================
# Positional tests
# ================
class TestPositionalsNargsNone(ParserTestCase):
"""Test a Positional that doesn't specify nargs"""
argument_signatures = [Sig('foo')]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo='a')),
]
class TestPositionalsNargs1(ParserTestCase):
"""Test a Positional that specifies an nargs of 1"""
argument_signatures = [Sig('foo', nargs=1)]
failures = ['', '-x', 'a b']
successes = [
('a', NS(foo=['a'])),
]
class TestPositionalsNargs2(ParserTestCase):
"""Test a Positional that specifies an nargs of 2"""
argument_signatures = [Sig('foo', nargs=2)]
failures = ['', 'a', '-x', 'a b c']
successes = [
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMore(ParserTestCase):
"""Test a Positional that specifies unlimited nargs"""
argument_signatures = [Sig('foo', nargs='*')]
failures = ['-x']
successes = [
('', NS(foo=[])),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsZeroOrMoreDefault(ParserTestCase):
"""Test a Positional that specifies unlimited nargs and a default"""
argument_signatures = [Sig('foo', nargs='*', default='bar')]
failures = ['-x']
successes = [
('', NS(foo='bar')),
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOneOrMore(ParserTestCase):
"""Test a Positional that specifies one or more nargs"""
argument_signatures = [Sig('foo', nargs='+')]
failures = ['', '-x']
successes = [
('a', NS(foo=['a'])),
('a b', NS(foo=['a', 'b'])),
]
class TestPositionalsNargsOptional(ParserTestCase):
"""Tests an Optional Positional"""
argument_signatures = [Sig('foo', nargs='?')]
failures = ['-x', 'a b']
successes = [
('', NS(foo=None)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalDefault(ParserTestCase):
"""Tests an Optional Positional with a default value"""
argument_signatures = [Sig('foo', nargs='?', default=42)]
failures = ['-x', 'a b']
successes = [
('', NS(foo=42)),
('a', NS(foo='a')),
]
class TestPositionalsNargsOptionalConvertedDefault(ParserTestCase):
"""Tests an Optional Positional with a default value
that needs to be converted to the appropriate type.
"""
argument_signatures = [
Sig('foo', nargs='?', type=int, default='42'),
]
failures = ['-x', 'a b', '1 2']
successes = [
('', NS(foo=42)),
('1', NS(foo=1)),
]
class TestPositionalsNargsNoneNone(ParserTestCase):
"""Test two Positionals that don't specify nargs"""
argument_signatures = [Sig('foo'), Sig('bar')]
failures = ['', '-x', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsNone1(ParserTestCase):
"""Test a Positional with no nargs followed by one with 1"""
argument_signatures = [Sig('foo'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a', 'a b c']
successes = [
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargs2None(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar')]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsNoneZeroOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with unlimited"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='*')]
failures = ['', '--foo']
successes = [
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOneOrMore(ParserTestCase):
"""Test a Positional with no nargs followed by one with one or more"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsNoneOptional(ParserTestCase):
"""Test a Positional with no nargs followed by one with an Optional"""
argument_signatures = [Sig('foo'), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo='a', bar=None)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsZeroOrMoreNone(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar')]
failures = ['', '--foo']
successes = [
('a', NS(foo=[], bar='a')),
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOneOrMoreNone(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar='b')),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsOptionalNone(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with none"""
argument_signatures = [Sig('foo', nargs='?', default=42), Sig('bar')]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=42, bar='a')),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargs2ZeroOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with unlimited"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='*')]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a', 'b'], bar=[])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2OneOrMore(ParserTestCase):
"""Test a Positional with 2 nargs followed by one with one or more"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='+')]
failures = ['', '--foo', 'a', 'a b']
successes = [
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargs2Optional(ParserTestCase):
"""Test a Positional with 2 nargs followed by one optional"""
argument_signatures = [Sig('foo', nargs=2), Sig('bar', nargs='?')]
failures = ['', '--foo', 'a', 'a b c d']
successes = [
('a b', NS(foo=['a', 'b'], bar=None)),
('a b c', NS(foo=['a', 'b'], bar='c')),
]
class TestPositionalsNargsZeroOrMore1(ParserTestCase):
"""Test a Positional with unlimited nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='*'), Sig('bar', nargs=1)]
failures = ['', '--foo', ]
successes = [
('a', NS(foo=[], bar=['a'])),
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOneOrMore1(ParserTestCase):
"""Test a Positional with one or more nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='+'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo=['a'], bar=['b'])),
('a b c', NS(foo=['a', 'b'], bar=['c'])),
]
class TestPositionalsNargsOptional1(ParserTestCase):
"""Test a Positional with an Optional nargs followed by one with 1"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs=1)]
failures = ['', '--foo', 'a b c']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
]
class TestPositionalsNargsNoneZeroOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, unlimited nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='*'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=[], baz=['b'])),
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
]
class TestPositionalsNargsNoneOneOrMore1(ParserTestCase):
"""Test three Positionals: no nargs, one or more nargs and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='+'),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a', 'b']
successes = [
('a b c', NS(foo='a', bar=['b'], baz=['c'])),
('a b c d', NS(foo='a', bar=['b', 'c'], baz=['d'])),
]
class TestPositionalsNargsNoneOptional1(ParserTestCase):
"""Test three Positionals: no nargs, optional narg and 1 nargs"""
argument_signatures = [
Sig('foo'),
Sig('bar', nargs='?', default=0.625),
Sig('baz', nargs=1),
]
failures = ['', '--foo', 'a']
successes = [
('a b', NS(foo='a', bar=0.625, baz=['b'])),
('a b c', NS(foo='a', bar='b', baz=['c'])),
]
class TestPositionalsNargsOptionalOptional(ParserTestCase):
"""Test two optional nargs"""
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='?', default=42),
]
failures = ['--foo', 'a b c']
successes = [
('', NS(foo=None, bar=42)),
('a', NS(foo='a', bar=42)),
('a b', NS(foo='a', bar='b')),
]
class TestPositionalsNargsOptionalZeroOrMore(ParserTestCase):
"""Test an Optional narg followed by unlimited nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='*')]
failures = ['--foo']
successes = [
('', NS(foo=None, bar=[])),
('a', NS(foo='a', bar=[])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsNargsOptionalOneOrMore(ParserTestCase):
"""Test an Optional narg followed by one or more nargs"""
argument_signatures = [Sig('foo', nargs='?'), Sig('bar', nargs='+')]
failures = ['', '--foo']
successes = [
('a', NS(foo=None, bar=['a'])),
('a b', NS(foo='a', bar=['b'])),
('a b c', NS(foo='a', bar=['b', 'c'])),
]
class TestPositionalsChoicesString(ParserTestCase):
"""Test a set of single-character choices"""
argument_signatures = [Sig('spam', choices=set('abcdefg'))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('a', NS(spam='a')),
('g', NS(spam='g')),
]
class TestPositionalsChoicesInt(ParserTestCase):
"""Test a set of integer choices"""
argument_signatures = [Sig('spam', type=int, choices=range(20))]
failures = ['', '--foo', 'h', '42', 'ef']
successes = [
('4', NS(spam=4)),
('15', NS(spam=15)),
]
class TestPositionalsActionAppend(ParserTestCase):
"""Test the 'append' action"""
argument_signatures = [
Sig('spam', action='append'),
Sig('spam', action='append', nargs=2),
]
failures = ['', '--foo', 'a', 'a b', 'a b c d']
successes = [
('a b c', NS(spam=['a', ['b', 'c']])),
]
# ========================================
# Combined optionals and positionals tests
# ========================================
class TestOptionalsNumericAndPositionals(ParserTestCase):
"""Tests negative number args when numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-4', dest='y', action='store_true'),
]
failures = ['-2', '-315']
successes = [
('', NS(x=None, y=False)),
('a', NS(x='a', y=False)),
('-4', NS(x=None, y=True)),
('-4 a', NS(x='a', y=True)),
]
class TestOptionalsAlmostNumericAndPositionals(ParserTestCase):
"""Tests negative number args when almost numeric options are present"""
argument_signatures = [
Sig('x', nargs='?'),
Sig('-k4', dest='y', action='store_true'),
]
failures = ['-k3']
successes = [
('', NS(x=None, y=False)),
('-2', NS(x='-2', y=False)),
('a', NS(x='a', y=False)),
('-k4', NS(x=None, y=True)),
('-k4 a', NS(x='a', y=True)),
]
class TestEmptyAndSpaceContainingArguments(ParserTestCase):
argument_signatures = [
Sig('x', nargs='?'),
Sig('-y', '--yyy', dest='y'),
]
failures = ['-y']
successes = [
([''], NS(x='', y=None)),
(['a badger'], NS(x='a badger', y=None)),
(['-a badger'], NS(x='-a badger', y=None)),
(['-y', ''], NS(x=None, y='')),
(['-y', 'a badger'], NS(x=None, y='a badger')),
(['-y', '-a badger'], NS(x=None, y='-a badger')),
(['--yyy=a badger'], NS(x=None, y='a badger')),
(['--yyy=-a badger'], NS(x=None, y='-a badger')),
]
class TestPrefixCharacterOnlyArguments(ParserTestCase):
parser_signature = Sig(prefix_chars='-+')
argument_signatures = [
Sig('-', dest='x', nargs='?', const='badger'),
Sig('+', dest='y', type=int, default=42),
Sig('-+-', dest='z', action='store_true'),
]
failures = ['-y', '+ -']
successes = [
('', NS(x=None, y=42, z=False)),
('-', NS(x='badger', y=42, z=False)),
('- X', NS(x='X', y=42, z=False)),
('+ -3', NS(x=None, y=-3, z=False)),
('-+-', NS(x=None, y=42, z=True)),
('- ===', NS(x='===', y=42, z=False)),
]
class TestNargsZeroOrMore(ParserTestCase):
"""Tests specifying an args for an Optional that accepts zero or more"""
argument_signatures = [Sig('-x', nargs='*'), Sig('y', nargs='*')]
failures = []
successes = [
('', NS(x=None, y=[])),
('-x', NS(x=[], y=[])),
('-x a', NS(x=['a'], y=[])),
('-x a -- b', NS(x=['a'], y=['b'])),
('a', NS(x=None, y=['a'])),
('a -x', NS(x=[], y=['a'])),
('a -x b', NS(x=['b'], y=['a'])),
]
class TestNargsRemainder(ParserTestCase):
"""Tests specifying a positional with nargs=REMAINDER"""
argument_signatures = [Sig('x'), Sig('y', nargs='...'), Sig('-z')]
failures = ['', '-z', '-z Z']
successes = [
('X', NS(x='X', y=[], z=None)),
('-z Z X', NS(x='X', y=[], z='Z')),
('X A B -z Z', NS(x='X', y=['A', 'B', '-z', 'Z'], z=None)),
('X Y --foo', NS(x='X', y=['Y', '--foo'], z=None)),
]
class TestOptionLike(ParserTestCase):
"""Tests options that may or may not be arguments"""
argument_signatures = [
Sig('-x', type=float),
Sig('-3', type=float, dest='y'),
Sig('z', nargs='*'),
]
failures = ['-x', '-y2.5', '-xa', '-x -a',
'-x -3', '-x -3.5', '-3 -3.5',
'-x -2.5', '-x -2.5 a', '-3 -.5',
'a x -1', '-x -1 a', '-3 -1 a']
successes = [
('', NS(x=None, y=None, z=[])),
('-x 2.5', NS(x=2.5, y=None, z=[])),
('-x 2.5 a', NS(x=2.5, y=None, z=['a'])),
('-3.5', NS(x=None, y=0.5, z=[])),
('-3-.5', NS(x=None, y=-0.5, z=[])),
('-3 .5', NS(x=None, y=0.5, z=[])),
('a -3.5', NS(x=None, y=0.5, z=['a'])),
('a', NS(x=None, y=None, z=['a'])),
('a -x 1', NS(x=1.0, y=None, z=['a'])),
('-x 1 a', NS(x=1.0, y=None, z=['a'])),
('-3 1 a', NS(x=None, y=1.0, z=['a'])),
]
class TestDefaultSuppress(ParserTestCase):
"""Test actions with suppressed defaults"""
argument_signatures = [
Sig('foo', nargs='?', default=argparse.SUPPRESS),
Sig('bar', nargs='*', default=argparse.SUPPRESS),
Sig('--baz', action='store_true', default=argparse.SUPPRESS),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefaultSuppress(ParserTestCase):
"""Test actions with a parser-level default of SUPPRESS"""
parser_signature = Sig(argument_default=argparse.SUPPRESS)
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS()),
('a', NS(foo='a')),
('a b', NS(foo='a', bar=['b'])),
('--baz', NS(baz=True)),
('a --baz', NS(foo='a', baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestParserDefault42(ParserTestCase):
"""Test actions with a parser-level default of 42"""
parser_signature = Sig(argument_default=42, version='1.0')
argument_signatures = [
Sig('foo', nargs='?'),
Sig('bar', nargs='*'),
Sig('--baz', action='store_true'),
]
failures = ['-x']
successes = [
('', NS(foo=42, bar=42, baz=42)),
('a', NS(foo='a', bar=42, baz=42)),
('a b', NS(foo='a', bar=['b'], baz=42)),
('--baz', NS(foo=42, bar=42, baz=True)),
('a --baz', NS(foo='a', bar=42, baz=True)),
('--baz a b', NS(foo='a', bar=['b'], baz=True)),
]
class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFile, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
('recursive', '-a\n'
'A\n'
'@hello'),
('invalid', '@no-such-path\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('-a'),
Sig('x'),
Sig('y', nargs='+'),
]
failures = ['', '-b', 'X', '@invalid', '@missing']
successes = [
('X Y', NS(a=None, x='X', y=['Y'])),
('X -a A Y Z', NS(a='A', x='X', y=['Y', 'Z'])),
('@hello X', NS(a=None, x='hello world!', y=['X'])),
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
]
class TestArgumentsFromFileConverter(TempDirMixin, ParserTestCase):
"""Test reading arguments from a file"""
def setUp(self):
super(TestArgumentsFromFileConverter, self).setUp()
file_texts = [
('hello', 'hello world!\n'),
]
for path, text in file_texts:
file = open(path, 'w')
file.write(text)
file.close()
class FromFileConverterArgumentParser(ErrorRaisingArgumentParser):
def convert_arg_line_to_args(self, arg_line):
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
parser_class = FromFileConverterArgumentParser
parser_signature = Sig(fromfile_prefix_chars='@')
argument_signatures = [
Sig('y', nargs='+'),
]
failures = []
successes = [
('@hello X', NS(y=['hello', 'world!', 'X'])),
]
# =====================
# Type conversion tests
# =====================
class TestFileTypeRepr(TestCase):
def test_r(self):
type = argparse.FileType('r')
self.assertEqual("FileType('r')", repr(type))
def test_wb_1(self):
type = argparse.FileType('wb', 1)
self.assertEqual("FileType('wb', 1)", repr(type))
class RFile(object):
seen = {}
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
if other in self.seen:
text = self.seen[other]
else:
text = self.seen[other] = other.read()
other.close()
if not isinstance(text, str):
text = text.decode('ascii')
return self.name == other.name == text
class TestFileTypeR(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeR, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType()),
Sig('spam', type=argparse.FileType('r')),
]
failures = ['-x', '-x bar', 'non-existent-file.txt']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
('readonly', NS(x=None, spam=RFile('readonly'))),
]
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
def setUp(self):
super(TestFileTypeRB, self).setUp()
for file_name in ['foo', 'bar']:
file = open(os.path.join(self.temp_dir, file_name), 'w')
file.write(file_name)
file.close()
argument_signatures = [
Sig('-x', type=argparse.FileType('rb')),
Sig('spam', type=argparse.FileType('rb')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=RFile('foo'))),
('-x foo bar', NS(x=RFile('foo'), spam=RFile('bar'))),
('bar -x foo', NS(x=RFile('foo'), spam=RFile('bar'))),
('-x - -', NS(x=sys.stdin, spam=sys.stdin)),
]
class WFile(object):
seen = set()
def __init__(self, name):
self.name = name
__hash__ = None
def __eq__(self, other):
if other not in self.seen:
text = 'Check that file is writable.'
if 'b' in other.mode:
text = text.encode('ascii')
other.write(text)
other.close()
self.seen.add(other)
return self.name == other.name
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"non-root user required")
class TestFileTypeW(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for writing files"""
def setUp(self):
super(TestFileTypeW, self).setUp()
self.create_readonly_file('readonly')
argument_signatures = [
Sig('-x', type=argparse.FileType('w')),
Sig('spam', type=argparse.FileType('w')),
]
failures = ['-x', '-x bar']
failures = ['-x', '-x bar', 'readonly']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestFileTypeWB(TempDirMixin, ParserTestCase):
argument_signatures = [
Sig('-x', type=argparse.FileType('wb')),
Sig('spam', type=argparse.FileType('wb')),
]
failures = ['-x', '-x bar']
successes = [
('foo', NS(x=None, spam=WFile('foo'))),
('-x foo bar', NS(x=WFile('foo'), spam=WFile('bar'))),
('bar -x foo', NS(x=WFile('foo'), spam=WFile('bar'))),
('-x - -', NS(x=sys.stdout, spam=sys.stdout)),
]
class TestTypeCallable(ParserTestCase):
"""Test some callables as option/argument types"""
argument_signatures = [
Sig('--eggs', type=complex),
Sig('spam', type=float),
]
failures = ['a', '42j', '--eggs a', '--eggs 2i']
successes = [
('--eggs=42 42', NS(eggs=42, spam=42.0)),
('--eggs 2j -- -1.5', NS(eggs=2j, spam=-1.5)),
('1024.675', NS(eggs=None, spam=1024.675)),
]
class TestTypeUserDefined(ParserTestCase):
"""Test a user-defined option/argument type"""
class MyType(TestCase):
def __init__(self, value):
self.value = value
__hash__ = None
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=MyType),
Sig('spam', type=MyType),
]
failures = []
successes = [
('a -x b', NS(x=MyType('b'), spam=MyType('a'))),
('-xf g', NS(x=MyType('f'), spam=MyType('g'))),
]
class TestTypeClassicClass(ParserTestCase):
"""Test a classic class type"""
class C:
def __init__(self, value):
self.value = value
__hash__ = None
def __eq__(self, other):
return (type(self), self.value) == (type(other), other.value)
argument_signatures = [
Sig('-x', type=C),
Sig('spam', type=C),
]
failures = []
successes = [
('a -x b', NS(x=C('b'), spam=C('a'))),
('-xf g', NS(x=C('f'), spam=C('g'))),
]
class TestTypeRegistration(TestCase):
"""Test a user-defined type by registering it"""
def test(self):
def get_my_type(string):
return 'my_type{%s}' % string
parser = argparse.ArgumentParser()
parser.register('type', 'my_type', get_my_type)
parser.add_argument('-x', type='my_type')
parser.add_argument('y', type='my_type')
self.assertEqual(parser.parse_args('1'.split()),
NS(x=None, y='my_type{1}'))
self.assertEqual(parser.parse_args('-x 1 42'.split()),
NS(x='my_type{1}', y='my_type{42}'))
# ============
# Action tests
# ============
class TestActionUserDefined(ParserTestCase):
"""Test a user-defined option/argument action"""
class OptionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
# check destination and option string
assert self.dest == 'spam', 'dest: %s' % self.dest
assert option_string == '-s', 'flag: %s' % option_string
# when option is before argument, badger=2, and when
# option is after argument, badger=<whatever was set>
expected_ns = NS(spam=0.25)
if value in [0.125, 0.625]:
expected_ns.badger = 2
elif value in [2.0]:
expected_ns.badger = 84
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('opt_action failed: %s' % e)
setattr(namespace, 'spam', value)
class PositionalAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
try:
assert option_string is None, ('option_string: %s' %
option_string)
# check destination
assert self.dest == 'badger', 'dest: %s' % self.dest
# when argument is before option, spam=0.25, and when
# option is after argument, spam=<whatever was set>
expected_ns = NS(badger=2)
if value in [42, 84]:
expected_ns.spam = 0.25
elif value in [1]:
expected_ns.spam = 0.625
elif value in [2]:
expected_ns.spam = 0.125
else:
raise AssertionError('value: %s' % value)
assert expected_ns == namespace, ('expected %s, got %s' %
(expected_ns, namespace))
except AssertionError:
e = sys.exc_info()[1]
raise ArgumentParserError('arg_action failed: %s' % e)
setattr(namespace, 'badger', value)
argument_signatures = [
Sig('-s', dest='spam', action=OptionalAction,
type=float, default=0.25),
Sig('badger', action=PositionalAction,
type=int, nargs='?', default=2),
]
failures = []
successes = [
('-s0.125', NS(spam=0.125, badger=2)),
('42', NS(spam=0.25, badger=42)),
('-s 0.625 1', NS(spam=0.625, badger=1)),
('84 -s2', NS(spam=2.0, badger=84)),
]
class TestActionRegistration(TestCase):
"""Test a user-defined action supplied by registering it"""
class MyAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, 'foo[%s]' % values)
def test(self):
parser = argparse.ArgumentParser()
parser.register('action', 'my_action', self.MyAction)
parser.add_argument('badger', action='my_action')
self.assertEqual(parser.parse_args(['1']), NS(badger='foo[1]'))
self.assertEqual(parser.parse_args(['42']), NS(badger='foo[42]'))
# ================
# Subparsers tests
# ================
class TestAddSubparsers(TestCase):
"""Test the add_subparsers method"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def _get_parser(self, subparser_help=False, prefix_chars=None):
# create a parser with a subparsers argument
if prefix_chars:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description', prefix_chars=prefix_chars)
parser.add_argument(
prefix_chars[0] * 2 + 'foo', action='store_true', help='foo help')
else:
parser = ErrorRaisingArgumentParser(
prog='PROG', description='main description')
parser.add_argument(
'--foo', action='store_true', help='foo help')
parser.add_argument(
'bar', type=float, help='bar help')
# check that only one subparsers argument can be added
subparsers = parser.add_subparsers(help='command help')
self.assertArgumentParserError(parser.add_subparsers)
# add first sub-parser
parser1_kwargs = dict(description='1 description')
if subparser_help:
parser1_kwargs['help'] = '1 help'
parser1 = subparsers.add_parser('1', **parser1_kwargs)
parser1.add_argument('-w', type=int, help='w help')
parser1.add_argument('x', choices='abc', help='x help')
# add second sub-parser
parser2_kwargs = dict(description='2 description')
if subparser_help:
parser2_kwargs['help'] = '2 help'
parser2 = subparsers.add_parser('2', **parser2_kwargs)
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
# return the main parser
return parser
def setUp(self):
super(TestAddSubparsers, self).setUp()
self.parser = self._get_parser()
self.command_help_parser = self._get_parser(subparser_help=True)
def test_parse_args_failures(self):
# check some failure cases:
for args_str in ['', 'a', 'a a', '0.5 a', '0.5 1',
'0.5 1 -y', '0.5 2 -w']:
args = args_str.split()
self.assertArgumentParserError(self.parser.parse_args, args)
def test_parse_args(self):
# check some non-failure cases:
self.assertEqual(
self.parser.parse_args('0.5 1 b -w 7'.split()),
NS(foo=False, bar=0.5, w=7, x='b'),
)
self.assertEqual(
self.parser.parse_args('0.25 --foo 2 -y 2 3j -- -1j'.split()),
NS(foo=True, bar=0.25, y='2', z=[3j, -1j]),
)
self.assertEqual(
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
def test_parse_known_args(self):
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), []),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -p 1 b -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -w 7 -p'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-p']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 1 b -q -rs -w 7'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-q', '-rs']),
)
self.assertEqual(
self.parser.parse_known_args('0.5 -W 1 b -X Y -w 7 Z'.split()),
(NS(foo=False, bar=0.5, w=7, x='b'), ['-W', '-X', 'Y', 'Z']),
)
def test_dest(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('--foo', action='store_true')
subparsers = parser.add_subparsers(dest='bar')
parser1 = subparsers.add_parser('1')
parser1.add_argument('baz')
self.assertEqual(NS(foo=False, bar='1', baz='2'),
parser.parse_args('1 2'.split()))
def test_help(self):
self.assertEqual(self.parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_help_extra_prefix_chars(self):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
-h, --help show this help message and exit
++foo foo help
'''))
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
'usage: PROG [+h] [++foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [+h] [++foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
optional arguments:
+h, ++help show this help message and exit
++foo foo help
'''))
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
{1,2} command help
1 1 help
2 2 help
optional arguments:
-h, --help show this help message and exit
--foo foo help
'''))
def test_subparser_title_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG',
description='main description')
parser.add_argument('--foo', action='store_true', help='foo help')
parser.add_argument('bar', help='bar help')
subparsers = parser.add_subparsers(title='subcommands',
description='command help',
help='additional text')
parser1 = subparsers.add_parser('1')
parser2 = subparsers.add_parser('2')
self.assertEqual(parser.format_usage(),
'usage: PROG [-h] [--foo] bar {1,2} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [--foo] bar {1,2} ...
main description
positional arguments:
bar bar help
optional arguments:
-h, --help show this help message and exit
--foo foo help
subcommands:
command help
{1,2} additional text
'''))
def _test_subparser_help(self, args_str, expected_help):
try:
self.parser.parse_args(args_str.split())
except ArgumentParserError:
err = sys.exc_info()[1]
if err.stdout != expected_help:
print(repr(expected_help))
print(repr(err.stdout))
self.assertEqual(err.stdout, expected_help)
def test_subparser1_help(self):
self._test_subparser_help('5.0 1 -h', textwrap.dedent('''\
usage: PROG bar 1 [-h] [-w W] {a,b,c}
1 description
positional arguments:
{a,b,c} x help
optional arguments:
-h, --help show this help message and exit
-w W w help
'''))
def test_subparser2_help(self):
self._test_subparser_help('5.0 2 -h', textwrap.dedent('''\
usage: PROG bar 2 [-h] [-y {1,2,3}] [z [z ...]]
2 description
positional arguments:
z z help
optional arguments:
-h, --help show this help message and exit
-y {1,2,3} y help
'''))
# ============
# Groups tests
# ============
class TestPositionalsGroups(TestCase):
"""Tests that order of group positionals matches construction order"""
def test_nongroup_first(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('foo')
group = parser.add_argument_group('g')
group.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_group_first(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
group.add_argument('foo')
parser.add_argument('bar')
parser.add_argument('baz')
expected = NS(foo='1', bar='2', baz='3')
result = parser.parse_args('1 2 3'.split())
self.assertEqual(expected, result)
def test_interleaved_groups(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_argument_group('xxx')
parser.add_argument('foo')
group.add_argument('bar')
parser.add_argument('baz')
group = parser.add_argument_group('yyy')
group.add_argument('frell')
expected = NS(foo='1', bar='2', baz='3', frell='4')
result = parser.parse_args('1 2 3 4'.split())
self.assertEqual(expected, result)
# ===================
# Parent parser tests
# ===================
class TestParentParsers(TestCase):
"""Tests that parsers can be created with parent parsers"""
def assertArgumentParserError(self, *args, **kwargs):
self.assertRaises(ArgumentParserError, *args, **kwargs)
def setUp(self):
super(TestParentParsers, self).setUp()
self.wxyz_parent = ErrorRaisingArgumentParser(add_help=False)
self.wxyz_parent.add_argument('--w')
x_group = self.wxyz_parent.add_argument_group('x')
x_group.add_argument('-y')
self.wxyz_parent.add_argument('z')
self.abcd_parent = ErrorRaisingArgumentParser(add_help=False)
self.abcd_parent.add_argument('a')
self.abcd_parent.add_argument('-b')
c_group = self.abcd_parent.add_argument_group('c')
c_group.add_argument('--d')
self.w_parent = ErrorRaisingArgumentParser(add_help=False)
self.w_parent.add_argument('--w')
self.z_parent = ErrorRaisingArgumentParser(add_help=False)
self.z_parent.add_argument('z')
# parents with mutually exclusive groups
self.ab_mutex_parent = ErrorRaisingArgumentParser(add_help=False)
group = self.ab_mutex_parent.add_mutually_exclusive_group()
group.add_argument('-a', action='store_true')
group.add_argument('-b', action='store_true')
self.main_program = os.path.basename(sys.argv[0])
def test_single_parent(self):
parser = ErrorRaisingArgumentParser(parents=[self.wxyz_parent])
self.assertEqual(parser.parse_args('-y 1 2 --w 3'.split()),
NS(w='3', y='1', z='2'))
def test_single_parent_mutex(self):
self._test_mutex_ab(self.ab_mutex_parent.parse_args)
parser = ErrorRaisingArgumentParser(parents=[self.ab_mutex_parent])
self._test_mutex_ab(parser.parse_args)
def test_single_granparent_mutex(self):
parents = [self.ab_mutex_parent]
parser = ErrorRaisingArgumentParser(add_help=False, parents=parents)
parser = ErrorRaisingArgumentParser(parents=[parser])
self._test_mutex_ab(parser.parse_args)
def _test_mutex_ab(self, parse_args):
self.assertEqual(parse_args([]), NS(a=False, b=False))
self.assertEqual(parse_args(['-a']), NS(a=True, b=False))
self.assertEqual(parse_args(['-b']), NS(a=False, b=True))
self.assertArgumentParserError(parse_args, ['-a', '-b'])
self.assertArgumentParserError(parse_args, ['-b', '-a'])
self.assertArgumentParserError(parse_args, ['-c'])
self.assertArgumentParserError(parse_args, ['-a', '-c'])
self.assertArgumentParserError(parse_args, ['-b', '-c'])
def test_multiple_parents(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('--d 1 --w 2 3 4'.split()),
NS(a='3', b=None, d='1', w='2', y=None, z='4'))
def test_multiple_parents_mutex(self):
parents = [self.ab_mutex_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('-a --w 2 3'.split()),
NS(a=True, b=False, w='2', y=None, z='3'))
self.assertArgumentParserError(
parser.parse_args, '-a --w 2 3 -b'.split())
self.assertArgumentParserError(
parser.parse_args, '-a -b --w 2 3'.split())
def test_conflicting_parents(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.w_parent, self.wxyz_parent])
def test_conflicting_parents_mutex(self):
self.assertRaises(
argparse.ArgumentError,
argparse.ArgumentParser,
parents=[self.abcd_parent, self.ab_mutex_parent])
def test_same_argument_name_parents(self):
parents = [self.wxyz_parent, self.z_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
self.assertEqual(parser.parse_args('1 2'.split()),
NS(w=None, y=None, z='2'))
def test_subparser_parents(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
abcde_parser = subparsers.add_parser('bar', parents=[self.abcd_parent])
abcde_parser.add_argument('e')
self.assertEqual(parser.parse_args('bar -b 1 --d 2 3 4'.split()),
NS(a='3', b='1', d='2', e='4'))
def test_subparser_parents_mutex(self):
parser = ErrorRaisingArgumentParser()
subparsers = parser.add_subparsers()
parents = [self.ab_mutex_parent]
abc_parser = subparsers.add_parser('foo', parents=parents)
c_group = abc_parser.add_argument_group('c_group')
c_group.add_argument('c')
parents = [self.wxyz_parent, self.ab_mutex_parent]
wxyzabe_parser = subparsers.add_parser('bar', parents=parents)
wxyzabe_parser.add_argument('e')
self.assertEqual(parser.parse_args('foo -a 4'.split()),
NS(a=True, b=False, c='4'))
self.assertEqual(parser.parse_args('bar -b --w 2 3 4'.split()),
NS(a=False, b=True, w='2', y=None, z='3', e='4'))
self.assertArgumentParserError(
parser.parse_args, 'foo -a -b 4'.split())
self.assertArgumentParserError(
parser.parse_args, 'bar -b -a 4'.split())
def test_parent_help(self):
parents = [self.abcd_parent, self.wxyz_parent]
parser = ErrorRaisingArgumentParser(parents=parents)
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-b B] [--d D] [--w W] [-y Y] a z
positional arguments:
a
z
optional arguments:
-h, --help show this help message and exit
-b B
--w W
c:
--d D
x:
-y Y
'''.format(progname, ' ' if progname else '' )))
def test_groups_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
g = parent.add_argument_group(title='g', description='gd')
g.add_argument('-w')
g.add_argument('-x')
m = parent.add_mutually_exclusive_group()
m.add_argument('-y')
m.add_argument('-z')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertRaises(ArgumentParserError, parser.parse_args,
['-y', 'Y', '-z', 'Z'])
parser_help = parser.format_help()
progname = self.main_program
self.assertEqual(parser_help, textwrap.dedent('''\
usage: {}{}[-h] [-w W] [-x X] [-y Y | -z Z]
optional arguments:
-h, --help show this help message and exit
-y Y
-z Z
g:
gd
-w W
-x X
'''.format(progname, ' ' if progname else '' )))
# ==============================
# Mutually exclusive group tests
# ==============================
class TestMutuallyExclusiveGroupErrors(TestCase):
def test_invalid_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
raises = self.assertRaises
raises(TypeError, parser.add_mutually_exclusive_group, title='foo')
def test_invalid_add_argument(self):
parser = ErrorRaisingArgumentParser()
group = parser.add_mutually_exclusive_group()
add_argument = group.add_argument
raises = self.assertRaises
raises(ValueError, add_argument, '--foo', required=True)
raises(ValueError, add_argument, 'bar')
raises(ValueError, add_argument, 'bar', nargs='+')
raises(ValueError, add_argument, 'bar', nargs=1)
raises(ValueError, add_argument, 'bar', nargs=argparse.PARSER)
def test_help(self):
parser = ErrorRaisingArgumentParser(prog='PROG')
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('--foo', action='store_true')
group1.add_argument('--bar', action='store_false')
group2 = parser.add_mutually_exclusive_group()
group2.add_argument('--soup', action='store_true')
group2.add_argument('--nuts', action='store_false')
expected = '''\
usage: PROG [-h] [--foo | --bar] [--soup | --nuts]
optional arguments:
-h, --help show this help message and exit
--foo
--bar
--soup
--nuts
'''
self.assertEqual(parser.format_help(), textwrap.dedent(expected))
class MEMixin(object):
def test_failures_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
error = ArgumentParserError
for args_string in self.failures:
self.assertRaises(error, parse_args, args_string.split())
def test_failures_when_required(self):
parse_args = self.get_parser(required=True).parse_args
error = ArgumentParserError
for args_string in self.failures + ['']:
self.assertRaises(error, parse_args, args_string.split())
def test_successes_when_not_required(self):
parse_args = self.get_parser(required=False).parse_args
successes = self.successes + self.successes_when_not_required
for args_string, expected_ns in successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_successes_when_required(self):
parse_args = self.get_parser(required=True).parse_args
for args_string, expected_ns in self.successes:
actual_ns = parse_args(args_string.split())
self.assertEqual(actual_ns, expected_ns)
def test_usage_when_not_required(self):
format_usage = self.get_parser(required=False).format_usage
expected_usage = self.usage_when_not_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_usage_when_required(self):
format_usage = self.get_parser(required=True).format_usage
expected_usage = self.usage_when_required
self.assertEqual(format_usage(), textwrap.dedent(expected_usage))
def test_help_when_not_required(self):
format_help = self.get_parser(required=False).format_help
help = self.usage_when_not_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
def test_help_when_required(self):
format_help = self.get_parser(required=True).format_help
help = self.usage_when_required + self.help
self.assertEqual(format_help(), textwrap.dedent(help))
class TestMutuallyExclusiveSimple(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--bar', help='bar help')
group.add_argument('--baz', nargs='?', const='Z', help='baz help')
return parser
failures = ['--bar X --baz Y', '--bar X --baz']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--bar X --bar Z', NS(bar='Z', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
('--baz', NS(bar=None, baz='Z')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz [BAZ]]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz [BAZ])
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--bar BAR bar help
--baz [BAZ] baz help
'''
class TestMutuallyExclusiveLong(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('--abcde', help='abcde help')
parser.add_argument('--fghij', help='fghij help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--klmno', help='klmno help')
group.add_argument('--pqrst', help='pqrst help')
return parser
failures = ['--klmno X --pqrst Y']
successes = [
('--klmno X', NS(abcde=None, fghij=None, klmno='X', pqrst=None)),
('--abcde Y --klmno X',
NS(abcde='Y', fghij=None, klmno='X', pqrst=None)),
('--pqrst X', NS(abcde=None, fghij=None, klmno=None, pqrst='X')),
('--pqrst X --fghij Y',
NS(abcde=None, fghij='Y', klmno=None, pqrst='X')),
]
successes_when_not_required = [
('', NS(abcde=None, fghij=None, klmno=None, pqrst=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
[--klmno KLMNO | --pqrst PQRST]
'''
usage_when_required = '''\
usage: PROG [-h] [--abcde ABCDE] [--fghij FGHIJ]
(--klmno KLMNO | --pqrst PQRST)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
--abcde ABCDE abcde help
--fghij FGHIJ fghij help
--klmno KLMNO klmno help
--pqrst PQRST pqrst help
'''
class TestMutuallyExclusiveFirstSuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-x', help=argparse.SUPPRESS)
group.add_argument('-y', action='store_false', help='y help')
return parser
failures = ['-x X -y']
successes = [
('-x X', NS(x='X', y=True)),
('-x X -x Y', NS(x='Y', y=True)),
('-y', NS(x=None, y=False)),
]
successes_when_not_required = [
('', NS(x=None, y=True)),
]
usage_when_not_required = '''\
usage: PROG [-h] [-y]
'''
usage_when_required = '''\
usage: PROG [-h] -y
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-y y help
'''
class TestMutuallyExclusiveManySuppressed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
add = group.add_argument
add('--spam', action='store_true', help=argparse.SUPPRESS)
add('--badger', action='store_false', help=argparse.SUPPRESS)
add('--bladder', help=argparse.SUPPRESS)
return parser
failures = [
'--spam --badger',
'--badger --bladder B',
'--bladder B --spam',
]
successes = [
('--spam', NS(spam=True, badger=True, bladder=None)),
('--badger', NS(spam=False, badger=False, bladder=None)),
('--bladder B', NS(spam=False, badger=True, bladder='B')),
('--spam --spam', NS(spam=True, badger=True, bladder=None)),
]
successes_when_not_required = [
('', NS(spam=False, badger=True, bladder=None)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
'''
class TestMutuallyExclusiveOptionalAndPositional(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('--foo', action='store_true', help='FOO')
group.add_argument('--spam', help='SPAM')
group.add_argument('badger', nargs='*', default='X', help='BADGER')
return parser
failures = [
'--foo --spam S',
'--spam S X',
'X --foo',
'X Y Z --spam S',
'--foo X Y',
]
successes = [
('--foo', NS(foo=True, spam=None, badger='X')),
('--spam S', NS(foo=False, spam='S', badger='X')),
('X', NS(foo=False, spam=None, badger=['X'])),
('X Y Z', NS(foo=False, spam=None, badger=['X', 'Y', 'Z'])),
]
successes_when_not_required = [
('', NS(foo=False, spam=None, badger='X')),
]
usage_when_not_required = '''\
usage: PROG [-h] [--foo | --spam SPAM | badger [badger ...]]
'''
usage_when_required = '''\
usage: PROG [-h] (--foo | --spam SPAM | badger [badger ...])
'''
help = '''\
positional arguments:
badger BADGER
optional arguments:
-h, --help show this help message and exit
--foo FOO
--spam SPAM SPAM
'''
class TestMutuallyExclusiveOptionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('-x', action='store_true', help='x help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('-a', action='store_true', help='a help')
group.add_argument('-b', action='store_true', help='b help')
parser.add_argument('-y', action='store_true', help='y help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['-a -b', '-b -c', '-a -c', '-a -b -c']
successes = [
('-a', NS(a=True, b=False, c=False, x=False, y=False)),
('-b', NS(a=False, b=True, c=False, x=False, y=False)),
('-c', NS(a=False, b=False, c=True, x=False, y=False)),
('-a -x', NS(a=True, b=False, c=False, x=True, y=False)),
('-y -b', NS(a=False, b=True, c=False, x=False, y=True)),
('-x -y -c', NS(a=False, b=False, c=True, x=True, y=True)),
]
successes_when_not_required = [
('', NS(a=False, b=False, c=False, x=False, y=False)),
('-x', NS(a=False, b=False, c=False, x=True, y=False)),
('-y', NS(a=False, b=False, c=False, x=False, y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-x] [-a] [-b] [-y] [-c]
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
-x x help
-a a help
-b b help
-y y help
-c c help
'''
class TestMutuallyExclusiveInGroup(MEMixin, TestCase):
def get_parser(self, required=None):
parser = ErrorRaisingArgumentParser(prog='PROG')
titled_group = parser.add_argument_group(
title='Titled group', description='Group description')
mutex_group = \
titled_group.add_mutually_exclusive_group(required=required)
mutex_group.add_argument('--bar', help='bar help')
mutex_group.add_argument('--baz', help='baz help')
return parser
failures = ['--bar X --baz Y', '--baz X --bar Y']
successes = [
('--bar X', NS(bar='X', baz=None)),
('--baz Y', NS(bar=None, baz='Y')),
]
successes_when_not_required = [
('', NS(bar=None, baz=None)),
]
usage_when_not_required = '''\
usage: PROG [-h] [--bar BAR | --baz BAZ]
'''
usage_when_required = '''\
usage: PROG [-h] (--bar BAR | --baz BAZ)
'''
help = '''\
optional arguments:
-h, --help show this help message and exit
Titled group:
Group description
--bar BAR bar help
--baz BAZ baz help
'''
class TestMutuallyExclusiveOptionalsAndPositionalsMixed(MEMixin, TestCase):
def get_parser(self, required):
parser = ErrorRaisingArgumentParser(prog='PROG')
parser.add_argument('x', help='x help')
parser.add_argument('-y', action='store_true', help='y help')
group = parser.add_mutually_exclusive_group(required=required)
group.add_argument('a', nargs='?', help='a help')
group.add_argument('-b', action='store_true', help='b help')
group.add_argument('-c', action='store_true', help='c help')
return parser
failures = ['X A -b', '-b -c', '-c X A']
successes = [
('X A', NS(a='A', b=False, c=False, x='X', y=False)),
('X -b', NS(a=None, b=True, c=False, x='X', y=False)),
('X -c', NS(a=None, b=False, c=True, x='X', y=False)),
('X A -y', NS(a='A', b=False, c=False, x='X', y=True)),
('X -y -b', NS(a=None, b=True, c=False, x='X', y=True)),
]
successes_when_not_required = [
('X', NS(a=None, b=False, c=False, x='X', y=False)),
('X -y', NS(a=None, b=False, c=False, x='X', y=True)),
]
usage_when_required = usage_when_not_required = '''\
usage: PROG [-h] [-y] [-b] [-c] x [a]
'''
help = '''\
positional arguments:
x x help
a a help
optional arguments:
-h, --help show this help message and exit
-y y help
-b b help
-c c help
'''
# =================================================
# Mutually exclusive group in parent parser tests
# =================================================
class MEPBase(object):
def get_parser(self, required=None):
parent = super(MEPBase, self).get_parser(required=required)
parser = ErrorRaisingArgumentParser(
prog=parent.prog, add_help=False, parents=[parent])
return parser
class TestMutuallyExclusiveGroupErrorsParent(
MEPBase, TestMutuallyExclusiveGroupErrors):
pass
class TestMutuallyExclusiveSimpleParent(
MEPBase, TestMutuallyExclusiveSimple):
pass
class TestMutuallyExclusiveLongParent(
MEPBase, TestMutuallyExclusiveLong):
pass
class TestMutuallyExclusiveFirstSuppressedParent(
MEPBase, TestMutuallyExclusiveFirstSuppressed):
pass
class TestMutuallyExclusiveManySuppressedParent(
MEPBase, TestMutuallyExclusiveManySuppressed):
pass
class TestMutuallyExclusiveOptionalAndPositionalParent(
MEPBase, TestMutuallyExclusiveOptionalAndPositional):
pass
class TestMutuallyExclusiveOptionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsMixed):
pass
class TestMutuallyExclusiveOptionalsAndPositionalsMixedParent(
MEPBase, TestMutuallyExclusiveOptionalsAndPositionalsMixed):
pass
# =================
# Set default tests
# =================
class TestSetDefaults(TestCase):
def test_set_defaults_no_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
parser.set_defaults(y='bar', z=1)
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([]))
self.assertEqual(NS(x='foo', y='bar', z=1),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar', z=1),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='baz', y='bar', z=2),
parser.parse_args([], NS(x='baz', z=2)))
def test_set_defaults_with_args(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo', y='bar')
parser.add_argument('-x', default='xfoox')
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([]))
self.assertEqual(NS(x='xfoox', y='bar'),
parser.parse_args([], NS()))
self.assertEqual(NS(x='baz', y='bar'),
parser.parse_args([], NS(x='baz')))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS()))
self.assertEqual(NS(x='1', y='bar'),
parser.parse_args('-x 1'.split(), NS(x='baz')))
def test_set_defaults_subparsers(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(x='foo')
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser('a')
parser_a.set_defaults(y='bar')
self.assertEqual(NS(x='foo', y='bar'),
parser.parse_args('a'.split()))
def test_set_defaults_parents(self):
parent = ErrorRaisingArgumentParser(add_help=False)
parent.set_defaults(x='foo')
parser = ErrorRaisingArgumentParser(parents=[parent])
self.assertEqual(NS(x='foo'), parser.parse_args([]))
def test_set_defaults_same_as_add_argument(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
parser.add_argument('-w')
parser.add_argument('-x', default='XX')
parser.add_argument('y', nargs='?')
parser.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
def test_set_defaults_same_as_add_argument_group(self):
parser = ErrorRaisingArgumentParser()
parser.set_defaults(w='W', x='X', y='Y', z='Z')
group = parser.add_argument_group('foo')
group.add_argument('-w')
group.add_argument('-x', default='XX')
group.add_argument('y', nargs='?')
group.add_argument('z', nargs='?', default='ZZ')
# defaults set previously
self.assertEqual(NS(w='W', x='XX', y='Y', z='ZZ'),
parser.parse_args([]))
# reset defaults
parser.set_defaults(w='WW', x='X', y='YY', z='Z')
self.assertEqual(NS(w='WW', x='X', y='YY', z='Z'),
parser.parse_args([]))
# =================
# Get default tests
# =================
class TestGetDefault(TestCase):
def test_get_default(self):
parser = ErrorRaisingArgumentParser()
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--foo")
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(None, parser.get_default("bar"))
parser.add_argument("--bar", type=int, default=42)
self.assertEqual(None, parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
parser.set_defaults(foo="badger")
self.assertEqual("badger", parser.get_default("foo"))
self.assertEqual(42, parser.get_default("bar"))
# ==========================
# Namespace 'contains' tests
# ==========================
class TestNamespaceContainsSimple(TestCase):
def test_empty(self):
ns = argparse.Namespace()
self.assertEqual('' in ns, False)
self.assertEqual('' not in ns, True)
self.assertEqual('x' in ns, False)
def test_non_empty(self):
ns = argparse.Namespace(x=1, y=2)
self.assertEqual('x' in ns, True)
self.assertEqual('x' not in ns, False)
self.assertEqual('y' in ns, True)
self.assertEqual('' in ns, False)
self.assertEqual('xx' in ns, False)
self.assertEqual('z' in ns, False)
# =====================
# Help formatting tests
# =====================
class TestHelpFormattingMetaclass(type):
def __init__(cls, name, bases, bodydict):
if name == 'HelpTestCase':
return
class AddTests(object):
def __init__(self, test_class, func_suffix, std_name):
self.func_suffix = func_suffix
self.std_name = std_name
for test_func in [self.test_format,
self.test_print,
self.test_print_file]:
test_name = '%s_%s' % (test_func.__name__, func_suffix)
def test_wrapper(self, test_func=test_func):
test_func(self)
try:
test_wrapper.__name__ = test_name
except TypeError:
pass
setattr(test_class, test_name, test_wrapper)
def _get_parser(self, tester):
parser = argparse.ArgumentParser(
*tester.parser_signature.args,
**tester.parser_signature.kwargs)
for argument_sig in getattr(tester, 'argument_signatures', []):
parser.add_argument(*argument_sig.args,
**argument_sig.kwargs)
group_sigs = getattr(tester, 'argument_group_signatures', [])
for group_sig, argument_sigs in group_sigs:
group = parser.add_argument_group(*group_sig.args,
**group_sig.kwargs)
for argument_sig in argument_sigs:
group.add_argument(*argument_sig.args,
**argument_sig.kwargs)
subparsers_sigs = getattr(tester, 'subparsers_signatures', [])
if subparsers_sigs:
subparsers = parser.add_subparsers()
for subparser_sig in subparsers_sigs:
subparsers.add_parser(*subparser_sig.args,
**subparser_sig.kwargs)
return parser
def _test(self, tester, parser_text):
expected_text = getattr(tester, self.func_suffix)
expected_text = textwrap.dedent(expected_text)
if expected_text != parser_text:
print(repr(expected_text))
print(repr(parser_text))
for char1, char2 in zip(expected_text, parser_text):
if char1 != char2:
print('first diff: %r %r' % (char1, char2))
break
tester.assertEqual(expected_text, parser_text)
def test_format(self, tester):
parser = self._get_parser(tester)
format = getattr(parser, 'format_%s' % self.func_suffix)
self._test(tester, format())
def test_print(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
old_stream = getattr(sys, self.std_name)
setattr(sys, self.std_name, StdIOBuffer())
try:
print_()
parser_text = getattr(sys, self.std_name).getvalue()
finally:
setattr(sys, self.std_name, old_stream)
self._test(tester, parser_text)
def test_print_file(self, tester):
parser = self._get_parser(tester)
print_ = getattr(parser, 'print_%s' % self.func_suffix)
sfile = StdIOBuffer()
print_(sfile)
parser_text = sfile.getvalue()
self._test(tester, parser_text)
# add tests for {format,print}_{usage,help,version}
for func_suffix, std_name in [('usage', 'stdout'),
('help', 'stdout'),
('version', 'stderr')]:
AddTests(cls, func_suffix, std_name)
bases = TestCase,
HelpTestCase = TestHelpFormattingMetaclass('HelpTestCase', bases, {})
class TestHelpBiggerOptionals(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] foo bar
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
parser_signature = Sig(prog='PROG', description='DESCRIPTION',
epilog='EPILOG', version='0.1')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('foo', help='FOO HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = [
(Sig('GROUP TITLE', description='GROUP DESCRIPTION'), [
Sig('baz', help='BAZ HELP'),
Sig('-z', nargs='+', help='Z HELP')]),
]
usage = '''\
usage: PROG [-h] [-v] [-x] [--y Y] [-z Z [Z ...]] foo bar baz
'''
help = usage + '''\
DESCRIPTION
positional arguments:
foo FOO HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x X HELP
--y Y Y HELP
GROUP TITLE:
GROUP DESCRIPTION
baz BAZ HELP
-z Z [Z ...] Z HELP
EPILOG
'''
version = '''\
0.1
'''
class TestHelpBiggerPositionals(HelpTestCase):
"""Make sure that help aligns when arguments are longer"""
parser_signature = Sig(usage='USAGE', description='DESCRIPTION')
argument_signatures = [
Sig('-x', action='store_true', help='X HELP'),
Sig('--y', help='Y HELP'),
Sig('ekiekiekifekang', help='EKI HELP'),
Sig('bar', help='BAR HELP'),
]
argument_group_signatures = []
usage = '''\
usage: USAGE
'''
help = usage + '''\
DESCRIPTION
positional arguments:
ekiekiekifekang EKI HELP
bar BAR HELP
optional arguments:
-h, --help show this help message and exit
-x X HELP
--y Y Y HELP
'''
version = ''
class TestHelpReformatting(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(
prog='PROG',
description=' oddly formatted\n'
'description\n'
'\n'
'that is so long that it should go onto multiple '
'lines when wrapped')
argument_signatures = [
Sig('-x', metavar='XX', help='oddly\n'
' formatted -x help'),
Sig('y', metavar='yyy', help='normal y help'),
]
argument_group_signatures = [
(Sig('title', description='\n'
' oddly formatted group\n'
'\n'
'description'),
[Sig('-a', action='store_true',
help=' oddly \n'
'formatted -a help \n'
' again, so long that it should be wrapped over '
'multiple lines')]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
oddly formatted description that is so long that it should go onto \
multiple
lines when wrapped
positional arguments:
yyy normal y help
optional arguments:
-h, --help show this help message and exit
-x XX oddly formatted -x help
title:
oddly formatted group description
-a oddly formatted -a help again, so long that it should \
be wrapped
over multiple lines
'''
version = ''
class TestHelpWrappingShortNames(HelpTestCase):
"""Make sure that text after short names starts on the first line"""
parser_signature = Sig(prog='PROG', description= 'D\nD' * 30)
argument_signatures = [
Sig('-x', metavar='XX', help='XHH HX' * 20),
Sig('y', metavar='yyy', help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', action='store_true', help='AHHH HHA' * 10)]),
]
usage = '''\
usage: PROG [-h] [-x XX] [-a] yyy
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyy YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-x XX XHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH \
HXXHH HXXHH
HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HXXHH HX
ALPHAS:
-a AHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH HHAAHHH \
HHAAHHH
HHAAHHH HHAAHHH HHA
'''
version = ''
class TestHelpWrappingLongNames(HelpTestCase):
"""Make sure that text after long names starts on the next line"""
parser_signature = Sig(usage='USAGE', description= 'D D' * 30,
version='V V'*30)
argument_signatures = [
Sig('-x', metavar='X' * 25, help='XH XH' * 20),
Sig('y', metavar='y' * 25, help='YH YH' * 20),
]
argument_group_signatures = [
(Sig('ALPHAS'), [
Sig('-a', metavar='A' * 25, help='AH AH' * 20),
Sig('z', metavar='z' * 25, help='ZH ZH' * 20)]),
]
usage = '''\
usage: USAGE
'''
help = usage + '''\
D DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD DD \
DD DD DD
DD DD DD DD D
positional arguments:
yyyyyyyyyyyyyyyyyyyyyyyyy
YH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH \
YHYH YHYH
YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YHYH YH
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
XH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH \
XHXH XHXH
XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XHXH XH
ALPHAS:
-a AAAAAAAAAAAAAAAAAAAAAAAAA
AH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH \
AHAH AHAH
AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AHAH AH
zzzzzzzzzzzzzzzzzzzzzzzzz
ZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH \
ZHZH ZHZH
ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZHZH ZH
'''
version = '''\
V VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV VV \
VV VV VV
VV VV VV VV V
'''
class TestHelpUsage(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', nargs='+', help='w'),
Sig('-x', nargs='*', help='x'),
Sig('a', help='a'),
Sig('b', help='b', nargs=2),
Sig('c', help='c', nargs='?'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-y', nargs='?', help='y'),
Sig('-z', nargs=3, help='z'),
Sig('d', help='d', nargs='*'),
Sig('e', help='e', nargs='+'),
])
]
usage = '''\
usage: PROG [-h] [-w W [W ...]] [-x [X [X ...]]] [-y [Y]] [-z Z Z Z]
a b b [c] [d [d ...]] e [e ...]
'''
help = usage + '''\
positional arguments:
a a
b b
c c
optional arguments:
-h, --help show this help message and exit
-w W [W ...] w
-x [X [X ...]] x
group:
-y [Y] y
-z Z Z Z z
d d
e e
'''
version = ''
class TestHelpOnlyUserGroups(HelpTestCase):
"""Test basic usage messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = [
(Sig('xxxx'), [
Sig('-x', help='x'),
Sig('a', help='a'),
]),
(Sig('yyyy'), [
Sig('b', help='b'),
Sig('-y', help='y'),
]),
]
usage = '''\
usage: PROG [-x X] [-y Y] a b
'''
help = usage + '''\
xxxx:
-x X x
a a
yyyy:
b b
-y Y y
'''
version = ''
class TestHelpUsageLongProg(HelpTestCase):
"""Test usage messages where the prog is long"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W'),
Sig('-x', metavar='X'),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w W] [-x X] a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w W
-x X
'''
version = ''
class TestHelpUsageLongProgOptionsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the optionals wrap"""
parser_signature = Sig(prog='P' * 60)
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
[-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] [-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b
'''
help = usage + '''\
positional arguments:
a
b
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageLongProgPositionalsWrap(HelpTestCase):
"""Test usage messages where the prog is long and the positionals wrap"""
parser_signature = Sig(prog='P' * 60, add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpUsageOptionalsWrap(HelpTestCase):
"""Test usage messages where the optionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', metavar='W' * 25),
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a'),
Sig('b'),
Sig('c'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w WWWWWWWWWWWWWWWWWWWWWWWWW] \
[-x XXXXXXXXXXXXXXXXXXXXXXXXX]
[-y YYYYYYYYYYYYYYYYYYYYYYYYY] \
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
a b c
'''
help = usage + '''\
positional arguments:
a
b
c
optional arguments:
-h, --help show this help message and exit
-w WWWWWWWWWWWWWWWWWWWWWWWWW
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsWrap(HelpTestCase):
"""Test usage messages where the positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x'),
Sig('-y'),
Sig('-z'),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x X] [-y Y] [-z Z]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x X
-y Y
-z Z
'''
version = ''
class TestHelpUsageOptionalsPositionalsWrap(HelpTestCase):
"""Test usage messages where the optionals and positionals wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsageOptionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only optionals and they wrap"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', metavar='X' * 25),
Sig('-y', metavar='Y' * 25),
Sig('-z', metavar='Z' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-x XXXXXXXXXXXXXXXXXXXXXXXXX] \
[-y YYYYYYYYYYYYYYYYYYYYYYYYY]
[-z ZZZZZZZZZZZZZZZZZZZZZZZZZ]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-x XXXXXXXXXXXXXXXXXXXXXXXXX
-y YYYYYYYYYYYYYYYYYYYYYYYYY
-z ZZZZZZZZZZZZZZZZZZZZZZZZZ
'''
version = ''
class TestHelpUsagePositionalsOnlyWrap(HelpTestCase):
"""Test usage messages where there are only positionals and they wrap"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('a' * 25),
Sig('b' * 25),
Sig('c' * 25),
]
argument_group_signatures = []
usage = '''\
usage: PROG aaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
help = usage + '''\
positional arguments:
aaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbb
ccccccccccccccccccccccccc
'''
version = ''
class TestHelpVariableExpansion(HelpTestCase):
"""Test that variables are expanded properly in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-x', type=int,
help='x %(prog)s %(default)s %(type)s %%'),
Sig('-y', action='store_const', default=42, const='XXX',
help='y %(prog)s %(default)s %(const)s'),
Sig('--foo', choices='abc',
help='foo %(prog)s %(default)s %(choices)s'),
Sig('--bar', default='baz', choices=[1, 2], metavar='BBB',
help='bar %(prog)s %(default)s %(dest)s'),
Sig('spam', help='spam %(prog)s %(default)s'),
Sig('badger', default=0.5, help='badger %(prog)s %(default)s'),
]
argument_group_signatures = [
(Sig('group'), [
Sig('-a', help='a %(prog)s %(default)s'),
Sig('-b', default=-1, help='b %(prog)s %(default)s'),
])
]
usage = ('''\
usage: PROG [-h] [-x X] [-y] [--foo {a,b,c}] [--bar BBB] [-a A] [-b B]
spam badger
''')
help = usage + '''\
positional arguments:
spam spam PROG None
badger badger PROG 0.5
optional arguments:
-h, --help show this help message and exit
-x X x PROG None int %
-y y PROG 42 XXX
--foo {a,b,c} foo PROG None a, b, c
--bar BBB bar PROG baz bar
group:
-a A a PROG None
-b B b PROG -1
'''
version = ''
class TestHelpVariableExpansionUsageSupplied(HelpTestCase):
"""Test that variables are expanded properly when usage= is present"""
parser_signature = Sig(prog='PROG', usage='%(prog)s FOO')
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG FOO
''')
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
'''
version = ''
class TestHelpVariableExpansionNoArguments(HelpTestCase):
"""Test that variables are expanded properly with no arguments"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = []
argument_group_signatures = []
usage = ('''\
usage: PROG
''')
help = usage
version = ''
class TestHelpSuppressUsage(HelpTestCase):
"""Test that items can be suppressed in usage messages"""
parser_signature = Sig(prog='PROG', usage=argparse.SUPPRESS)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
help = '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
usage = ''
version = ''
class TestHelpSuppressOptional(HelpTestCase):
"""Test that optional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help=argparse.SUPPRESS),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG spam
'''
help = usage + '''\
positional arguments:
spam spam help
'''
version = ''
class TestHelpSuppressOptionalGroup(HelpTestCase):
"""Test that optional groups can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('group'), [Sig('--bar', help=argparse.SUPPRESS)]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpSuppressPositional(HelpTestCase):
"""Test that positional arguments can be suppressed in help messages"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help=argparse.SUPPRESS),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpRequiredOptional(HelpTestCase):
"""Test that required options don't look optional"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo', required=True, help='foo help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] --foo FOO
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
'''
version = ''
class TestHelpAlternatePrefixChars(HelpTestCase):
"""Test that options display with different prefix characters"""
parser_signature = Sig(prog='PROG', prefix_chars='^;', add_help=False)
argument_signatures = [
Sig('^^foo', action='store_true', help='foo help'),
Sig(';b', ';;bar', help='bar help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [^^foo] [;b BAR]
'''
help = usage + '''\
optional arguments:
^^foo foo help
;b BAR, ;;bar BAR bar help
'''
version = ''
class TestHelpNoHelpOptional(HelpTestCase):
"""Test that the --help argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', add_help=False)
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
--foo FOO foo help
'''
version = ''
class TestHelpVersionOptional(HelpTestCase):
"""Test that the --version argument can be suppressed help messages"""
parser_signature = Sig(prog='PROG', version='1.0')
argument_signatures = [
Sig('--foo', help='foo help'),
Sig('spam', help='spam help'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-v] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
--foo FOO foo help
'''
version = '''\
1.0
'''
class TestHelpNone(HelpTestCase):
"""Test that no errors occur if no help is specified"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('--foo'),
Sig('spam'),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [--foo FOO] spam
'''
help = usage + '''\
positional arguments:
spam
optional arguments:
-h, --help show this help message and exit
--foo FOO
'''
version = ''
class TestHelpTupleMetavar(HelpTestCase):
"""Test specifying metavar as a tuple"""
parser_signature = Sig(prog='PROG')
argument_signatures = [
Sig('-w', help='w', nargs='+', metavar=('W1', 'W2')),
Sig('-x', help='x', nargs='*', metavar=('X1', 'X2')),
Sig('-y', help='y', nargs=3, metavar=('Y1', 'Y2', 'Y3')),
Sig('-z', help='z', nargs='?', metavar=('Z1', )),
]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-w W1 [W2 ...]] [-x [X1 [X2 ...]]] [-y Y1 Y2 Y3] \
[-z [Z1]]
'''
help = usage + '''\
optional arguments:
-h, --help show this help message and exit
-w W1 [W2 ...] w
-x [X1 [X2 ...]] x
-y Y1 Y2 Y3 y
-z [Z1] z
'''
version = ''
class TestHelpRawText(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawTextHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should also\n'
'appear as given here'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should also
appear as given here
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpRawDescription(HelpTestCase):
"""Test the RawTextHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Keep the formatting\n'
' exactly as it is written\n'
'\n'
'here\n')
argument_signatures = [
Sig('--foo', help=' foo help should not\n'
' retain this odd formatting'),
Sig('spam', help='spam help'),
]
argument_group_signatures = [
(Sig('title', description=' This text\n'
' should be indented\n'
' exactly like it is here\n'),
[Sig('--bar', help='bar help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar BAR] spam
'''
help = usage + '''\
Keep the formatting
exactly as it is written
here
positional arguments:
spam spam help
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help should not retain this odd formatting
title:
This text
should be indented
exactly like it is here
--bar BAR bar help
'''
version = ''
class TestHelpArgumentDefaults(HelpTestCase):
"""Test the ArgumentDefaultsHelpFormatter"""
parser_signature = Sig(
prog='PROG', formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='description')
argument_signatures = [
Sig('--foo', help='foo help - oh and by the way, %(default)s'),
Sig('--bar', action='store_true', help='bar help'),
Sig('spam', help='spam help'),
Sig('badger', nargs='?', default='wooden', help='badger help'),
]
argument_group_signatures = [
(Sig('title', description='description'),
[Sig('--baz', type=int, default=42, help='baz help')]),
]
usage = '''\
usage: PROG [-h] [--foo FOO] [--bar] [--baz BAZ] spam [badger]
'''
help = usage + '''\
description
positional arguments:
spam spam help
badger badger help (default: wooden)
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help - oh and by the way, None
--bar bar help (default: False)
title:
description
--baz BAZ baz help (default: 42)
'''
version = ''
class TestHelpVersionAction(HelpTestCase):
"""Test the default help for the version action"""
parser_signature = Sig(prog='PROG', description='description')
argument_signatures = [Sig('-V', '--version', action='version', version='3.6')]
argument_group_signatures = []
usage = '''\
usage: PROG [-h] [-V]
'''
help = usage + '''\
description
optional arguments:
-h, --help show this help message and exit
-V, --version show program's version number and exit
'''
version = ''
class TestHelpSubparsersOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands',
version='0.1')
subparsers_signatures = [Sig(name=name)
for name in ('a', 'b', 'c', 'd', 'e')]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
class TestHelpSubparsersWithHelpOrdering(HelpTestCase):
"""Test ordering of subcommands in help matches the code"""
parser_signature = Sig(prog='PROG',
description='display some subcommands',
version='0.1')
subcommand_data = (('a', 'a subcommand help'),
('b', 'b subcommand help'),
('c', 'c subcommand help'),
('d', 'd subcommand help'),
('e', 'e subcommand help'),
)
subparsers_signatures = [Sig(name=name, help=help)
for name, help in subcommand_data]
usage = '''\
usage: PROG [-h] [-v] {a,b,c,d,e} ...
'''
help = usage + '''\
display some subcommands
positional arguments:
{a,b,c,d,e}
a a subcommand help
b b subcommand help
c c subcommand help
d d subcommand help
e e subcommand help
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
'''
version = '''\
0.1
'''
# =====================================
# Optional/Positional constructor tests
# =====================================
class TestInvalidArgumentConstructors(TestCase):
"""Test a bunch of invalid Argument constructors"""
def assertTypeError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(TypeError, parser.add_argument,
*args, **kwargs)
def assertValueError(self, *args, **kwargs):
parser = argparse.ArgumentParser()
self.assertRaises(ValueError, parser.add_argument,
*args, **kwargs)
def test_invalid_keyword_arguments(self):
self.assertTypeError('-x', bar=None)
self.assertTypeError('-y', callback='foo')
self.assertTypeError('-y', callback_args=())
self.assertTypeError('-y', callback_kwargs={})
def test_missing_destination(self):
self.assertTypeError()
for action in ['append', 'store']:
self.assertTypeError(action=action)
def test_invalid_option_strings(self):
self.assertValueError('--')
self.assertValueError('---')
def test_invalid_type(self):
self.assertValueError('--foo', type='int')
self.assertValueError('--foo', type=(int, float))
def test_invalid_action(self):
self.assertValueError('-x', action='foo')
self.assertValueError('foo', action='baz')
self.assertValueError('--foo', action=('store', 'append'))
parser = argparse.ArgumentParser()
try:
parser.add_argument("--foo", action="store-true")
except ValueError:
e = sys.exc_info()[1]
expected = 'unknown action'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_multiple_dest(self):
parser = argparse.ArgumentParser()
parser.add_argument(dest='foo')
try:
parser.add_argument('bar', dest='baz')
except ValueError:
e = sys.exc_info()[1]
expected = 'dest supplied twice for positional argument'
msg = 'expected %r, found %r' % (expected, e)
self.assertTrue(expected in str(e), msg)
def test_no_argument_actions(self):
for action in ['store_const', 'store_true', 'store_false',
'append_const', 'count']:
for attrs in [dict(type=int), dict(nargs='+'),
dict(choices='ab')]:
self.assertTypeError('-x', action=action, **attrs)
def test_no_argument_no_const_actions(self):
# options with zero arguments
for action in ['store_true', 'store_false', 'count']:
# const is always disallowed
self.assertTypeError('-x', const='foo', action=action)
# nargs is always disallowed
self.assertTypeError('-x', nargs='*', action=action)
def test_more_than_one_argument_actions(self):
for action in ['store', 'append']:
# nargs=0 is disallowed
self.assertValueError('-x', nargs=0, action=action)
self.assertValueError('spam', nargs=0, action=action)
# const is disallowed with non-optional arguments
for nargs in [1, '*', '+']:
self.assertValueError('-x', const='foo',
nargs=nargs, action=action)
self.assertValueError('spam', const='foo',
nargs=nargs, action=action)
def test_required_const_actions(self):
for action in ['store_const', 'append_const']:
# nargs is always disallowed
self.assertTypeError('-x', nargs='+', action=action)
def test_parsers_action_missing_params(self):
self.assertTypeError('command', action='parsers')
self.assertTypeError('command', action='parsers', prog='PROG')
self.assertTypeError('command', action='parsers',
parser_class=argparse.ArgumentParser)
def test_required_positional(self):
self.assertTypeError('foo', required=True)
def test_user_defined_action(self):
class Success(Exception):
pass
class Action(object):
def __init__(self,
option_strings,
dest,
const,
default,
required=False):
if dest == 'spam':
if const is Success:
if default is Success:
raise Success()
def __call__(self, *args, **kwargs):
pass
parser = argparse.ArgumentParser()
self.assertRaises(Success, parser.add_argument, '--spam',
action=Action, default=Success, const=Success)
self.assertRaises(Success, parser.add_argument, 'spam',
action=Action, default=Success, const=Success)
# ================================
# Actions returned by add_argument
# ================================
class TestActionsReturned(TestCase):
def test_dest(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo')
self.assertEqual(action.dest, 'foo')
action = parser.add_argument('-b', '--bar')
self.assertEqual(action.dest, 'bar')
action = parser.add_argument('-x', '-y')
self.assertEqual(action.dest, 'x')
def test_misc(self):
parser = argparse.ArgumentParser()
action = parser.add_argument('--foo', nargs='?', const=42,
default=84, type=int, choices=[1, 2],
help='FOO', metavar='BAR', dest='baz')
self.assertEqual(action.nargs, '?')
self.assertEqual(action.const, 42)
self.assertEqual(action.default, 84)
self.assertEqual(action.type, int)
self.assertEqual(action.choices, [1, 2])
self.assertEqual(action.help, 'FOO')
self.assertEqual(action.metavar, 'BAR')
self.assertEqual(action.dest, 'baz')
# ================================
# Argument conflict handling tests
# ================================
class TestConflictHandling(TestCase):
def test_bad_type(self):
self.assertRaises(ValueError, argparse.ArgumentParser,
conflict_handler='foo')
def test_conflict_error(self):
parser = argparse.ArgumentParser()
parser.add_argument('-x')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '-x')
parser.add_argument('--spam')
self.assertRaises(argparse.ArgumentError,
parser.add_argument, '--spam')
def test_resolve_error(self):
get_parser = argparse.ArgumentParser
parser = get_parser(prog='PROG', conflict_handler='resolve')
parser.add_argument('-x', help='OLD X')
parser.add_argument('-x', help='NEW X')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
'''))
parser.add_argument('--spam', metavar='OLD_SPAM')
parser.add_argument('--spam', metavar='NEW_SPAM')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
usage: PROG [-h] [-x X] [--spam NEW_SPAM]
optional arguments:
-h, --help show this help message and exit
-x X NEW X
--spam NEW_SPAM
'''))
# =============================
# Help and Version option tests
# =============================
class TestOptionalsHelpVersionActions(TestCase):
"""Test the help and version actions"""
def _get_error(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except ArgumentParserError:
return sys.exc_info()[1]
else:
self.assertRaises(ArgumentParserError, func, *args, **kwargs)
def assertPrintHelpExit(self, parser, args_str):
self.assertEqual(
parser.format_help(),
self._get_error(parser.parse_args, args_str.split()).stdout)
def assertPrintVersionExit(self, parser, args_str):
self.assertEqual(
parser.format_version(),
self._get_error(parser.parse_args, args_str.split()).stderr)
def assertArgumentParserError(self, parser, *args):
self.assertRaises(ArgumentParserError, parser.parse_args, args)
def test_version(self):
parser = ErrorRaisingArgumentParser(version='1.0')
self.assertPrintHelpExit(parser, '-h')
self.assertPrintHelpExit(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_format(self):
parser = ErrorRaisingArgumentParser(prog='PPP', version='%(prog)s 3.5')
msg = self._get_error(parser.parse_args, ['-v']).stderr
self.assertEqual('PPP 3.5\n', msg)
def test_version_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False, version='1.0')
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertPrintVersionExit(parser, '-v')
self.assertPrintVersionExit(parser, '--version')
def test_version_action(self):
parser = ErrorRaisingArgumentParser(prog='XXX')
parser.add_argument('-V', action='version', version='%(prog)s 3.7')
msg = self._get_error(parser.parse_args, ['-V']).stderr
self.assertEqual('XXX 3.7\n', msg)
def test_no_help(self):
parser = ErrorRaisingArgumentParser(add_help=False)
self.assertArgumentParserError(parser, '-h')
self.assertArgumentParserError(parser, '--help')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_alternate_help_version(self):
parser = ErrorRaisingArgumentParser()
parser.add_argument('-x', action='help')
parser.add_argument('-y', action='version')
self.assertPrintHelpExit(parser, '-x')
self.assertPrintVersionExit(parser, '-y')
self.assertArgumentParserError(parser, '-v')
self.assertArgumentParserError(parser, '--version')
def test_help_version_extra_arguments(self):
parser = ErrorRaisingArgumentParser(version='1.0')
parser.add_argument('-x', action='store_true')
parser.add_argument('y')
# try all combinations of valid prefixes and suffixes
valid_prefixes = ['', '-x', 'foo', '-x bar', 'baz -x']
valid_suffixes = valid_prefixes + ['--bad-option', 'foo bar baz']
for prefix in valid_prefixes:
for suffix in valid_suffixes:
format = '%s %%s %s' % (prefix, suffix)
self.assertPrintHelpExit(parser, format % '-h')
self.assertPrintHelpExit(parser, format % '--help')
self.assertPrintVersionExit(parser, format % '-v')
self.assertPrintVersionExit(parser, format % '--version')
# ======================
# str() and repr() tests
# ======================
class TestStrings(TestCase):
"""Test str() and repr() on Optionals and Positionals"""
def assertStringEqual(self, obj, result_string):
for func in [str, repr]:
self.assertEqual(func(obj), result_string)
def test_optional(self):
option = argparse.Action(
option_strings=['--foo', '-a', '-b'],
dest='b',
type='int',
nargs='+',
default=42,
choices=[1, 2, 3],
help='HELP',
metavar='METAVAR')
string = (
"Action(option_strings=['--foo', '-a', '-b'], dest='b', "
"nargs='+', const=None, default=42, type='int', "
"choices=[1, 2, 3], help='HELP', metavar='METAVAR')")
self.assertStringEqual(option, string)
def test_argument(self):
argument = argparse.Action(
option_strings=[],
dest='x',
type=float,
nargs='?',
default=2.5,
choices=[0.5, 1.5, 2.5],
help='H HH H',
metavar='MV MV MV')
string = (
"Action(option_strings=[], dest='x', nargs='?', "
"const=None, default=2.5, type=%r, choices=[0.5, 1.5, 2.5], "
"help='H HH H', metavar='MV MV MV')" % float)
self.assertStringEqual(argument, string)
def test_namespace(self):
ns = argparse.Namespace(foo=42, bar='spam')
string = "Namespace(bar='spam', foo=42)"
self.assertStringEqual(ns, string)
def test_parser(self):
parser = argparse.ArgumentParser(prog='PROG')
string = (
"ArgumentParser(prog='PROG', usage=None, description=None, "
"version=None, formatter_class=%r, conflict_handler='error', "
"add_help=True)" % argparse.HelpFormatter)
self.assertStringEqual(parser, string)
# ===============
# Namespace tests
# ===============
class TestNamespace(TestCase):
def test_constructor(self):
ns = argparse.Namespace()
self.assertRaises(AttributeError, getattr, ns, 'x')
ns = argparse.Namespace(a=42, b='spam')
self.assertEqual(ns.a, 42)
self.assertEqual(ns.b, 'spam')
def test_equality(self):
ns1 = argparse.Namespace(a=1, b=2)
ns2 = argparse.Namespace(b=2, a=1)
ns3 = argparse.Namespace(a=1)
ns4 = argparse.Namespace(b=2)
self.assertEqual(ns1, ns2)
self.assertNotEqual(ns1, ns3)
self.assertNotEqual(ns1, ns4)
self.assertNotEqual(ns2, ns3)
self.assertNotEqual(ns2, ns4)
self.assertTrue(ns1 != ns3)
self.assertTrue(ns1 != ns4)
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
# ===================
# File encoding tests
# ===================
class TestEncoding(TestCase):
def _test_module_encoding(self, path):
path, _ = os.path.splitext(path)
path += ".py"
with codecs.open(path, 'r', 'utf8') as f:
f.read()
def test_argparse_module_encoding(self):
self._test_module_encoding(argparse.__file__)
def test_test_argparse_module_encoding(self):
self._test_module_encoding(__file__)
# ===================
# ArgumentError tests
# ===================
class TestArgumentError(TestCase):
def test_argument_error(self):
msg = "my error here"
error = argparse.ArgumentError(None, msg)
self.assertEqual(str(error), msg)
# =======================
# ArgumentTypeError tests
# =======================
class TestArgumentTypeError(TestCase):
def test_argument_type_error(self):
def spam(string):
raise argparse.ArgumentTypeError('spam!')
parser = ErrorRaisingArgumentParser(prog='PROG', add_help=False)
parser.add_argument('x', type=spam)
try:
parser.parse_args(['XXX'])
except ArgumentParserError:
expected = 'usage: PROG x\nPROG: error: argument x: spam!\n'
msg = sys.exc_info()[1].stderr
self.assertEqual(expected, msg)
else:
self.fail()
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
args, extras = parser.parse_known_args('--foo F --bar --baz'.split())
self.assertEqual(NS(foo='F'), args)
self.assertEqual(['--bar', '--baz'], extras)
def test_mixed(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', nargs='?', const=1, type=int)
parser.add_argument('--spam', action='store_false')
parser.add_argument('badger')
argv = ["B", "C", "--foo", "-v", "3", "4"]
args, extras = parser.parse_known_args(argv)
self.assertEqual(NS(v=3, spam=True, badger="B"), args)
self.assertEqual(["C", "--foo", "4"], extras)
# ==========================
# add_argument metavar tests
# ==========================
class TestAddArgumentMetavar(TestCase):
EXPECTED_MESSAGE = "length of metavar tuple does not match nargs"
def do_test_no_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
def do_test_exception(self, nargs, metavar):
parser = argparse.ArgumentParser()
with self.assertRaises(ValueError) as cm:
parser.add_argument("--foo", nargs=nargs, metavar=metavar)
self.assertEqual(cm.exception.args[0], self.EXPECTED_MESSAGE)
# Unit tests for different values of metavar when nargs=None
def test_nargs_None_metavar_string(self):
self.do_test_no_exception(nargs=None, metavar="1")
def test_nargs_None_metavar_length0(self):
self.do_test_exception(nargs=None, metavar=tuple())
def test_nargs_None_metavar_length1(self):
self.do_test_no_exception(nargs=None, metavar=("1"))
def test_nargs_None_metavar_length2(self):
self.do_test_exception(nargs=None, metavar=("1", "2"))
def test_nargs_None_metavar_length3(self):
self.do_test_exception(nargs=None, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=?
def test_nargs_optional_metavar_string(self):
self.do_test_no_exception(nargs="?", metavar="1")
def test_nargs_optional_metavar_length0(self):
self.do_test_exception(nargs="?", metavar=tuple())
def test_nargs_optional_metavar_length1(self):
self.do_test_no_exception(nargs="?", metavar=("1"))
def test_nargs_optional_metavar_length2(self):
self.do_test_exception(nargs="?", metavar=("1", "2"))
def test_nargs_optional_metavar_length3(self):
self.do_test_exception(nargs="?", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=*
def test_nargs_zeroormore_metavar_string(self):
self.do_test_no_exception(nargs="*", metavar="1")
def test_nargs_zeroormore_metavar_length0(self):
self.do_test_exception(nargs="*", metavar=tuple())
def test_nargs_zeroormore_metavar_length1(self):
self.do_test_no_exception(nargs="*", metavar=("1"))
def test_nargs_zeroormore_metavar_length2(self):
self.do_test_no_exception(nargs="*", metavar=("1", "2"))
def test_nargs_zeroormore_metavar_length3(self):
self.do_test_exception(nargs="*", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=+
def test_nargs_oneormore_metavar_string(self):
self.do_test_no_exception(nargs="+", metavar="1")
def test_nargs_oneormore_metavar_length0(self):
self.do_test_exception(nargs="+", metavar=tuple())
def test_nargs_oneormore_metavar_length1(self):
self.do_test_no_exception(nargs="+", metavar=("1"))
def test_nargs_oneormore_metavar_length2(self):
self.do_test_no_exception(nargs="+", metavar=("1", "2"))
def test_nargs_oneormore_metavar_length3(self):
self.do_test_exception(nargs="+", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=...
def test_nargs_remainder_metavar_string(self):
self.do_test_no_exception(nargs="...", metavar="1")
def test_nargs_remainder_metavar_length0(self):
self.do_test_no_exception(nargs="...", metavar=tuple())
def test_nargs_remainder_metavar_length1(self):
self.do_test_no_exception(nargs="...", metavar=("1"))
def test_nargs_remainder_metavar_length2(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2"))
def test_nargs_remainder_metavar_length3(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=A...
def test_nargs_parser_metavar_string(self):
self.do_test_no_exception(nargs="A...", metavar="1")
def test_nargs_parser_metavar_length0(self):
self.do_test_exception(nargs="A...", metavar=tuple())
def test_nargs_parser_metavar_length1(self):
self.do_test_no_exception(nargs="A...", metavar=("1"))
def test_nargs_parser_metavar_length2(self):
self.do_test_exception(nargs="A...", metavar=("1", "2"))
def test_nargs_parser_metavar_length3(self):
self.do_test_exception(nargs="A...", metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=1
def test_nargs_1_metavar_string(self):
self.do_test_no_exception(nargs=1, metavar="1")
def test_nargs_1_metavar_length0(self):
self.do_test_exception(nargs=1, metavar=tuple())
def test_nargs_1_metavar_length1(self):
self.do_test_no_exception(nargs=1, metavar=("1"))
def test_nargs_1_metavar_length2(self):
self.do_test_exception(nargs=1, metavar=("1", "2"))
def test_nargs_1_metavar_length3(self):
self.do_test_exception(nargs=1, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=2
def test_nargs_2_metavar_string(self):
self.do_test_no_exception(nargs=2, metavar="1")
def test_nargs_2_metavar_length0(self):
self.do_test_exception(nargs=2, metavar=tuple())
def test_nargs_2_metavar_length1(self):
self.do_test_no_exception(nargs=2, metavar=("1"))
def test_nargs_2_metavar_length2(self):
self.do_test_no_exception(nargs=2, metavar=("1", "2"))
def test_nargs_2_metavar_length3(self):
self.do_test_exception(nargs=2, metavar=("1", "2", "3"))
# Unit tests for different values of metavar when nargs=3
def test_nargs_3_metavar_string(self):
self.do_test_no_exception(nargs=3, metavar="1")
def test_nargs_3_metavar_length0(self):
self.do_test_exception(nargs=3, metavar=tuple())
def test_nargs_3_metavar_length1(self):
self.do_test_no_exception(nargs=3, metavar=("1"))
def test_nargs_3_metavar_length2(self):
self.do_test_exception(nargs=3, metavar=("1", "2"))
def test_nargs_3_metavar_length3(self):
self.do_test_no_exception(nargs=3, metavar=("1", "2", "3"))
# ============================
# from argparse import * tests
# ============================
class TestImportStar(TestCase):
def test(self):
for name in argparse.__all__:
self.assertTrue(hasattr(argparse, name))
def test_all_exports_everything_but_modules(self):
items = [
name
for name, value in vars(argparse).items()
if not name.startswith("_")
if not inspect.ismodule(value)
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
def test_main():
# silence warnings about version argument - these are expected
with test_support.check_warnings(
('The "version" argument to ArgumentParser is deprecated.',
DeprecationWarning),
('The (format|print)_version method is deprecated',
DeprecationWarning)):
test_support.run_unittest(__name__)
# Remove global references to avoid looking like we have refleaks.
RFile.seen = {}
WFile.seen = set()
if __name__ == '__main__':
test_main()
|
the-stack_0_17964 | import os
import requests
url = "https://api.textlocal.in/send/"
def send_sms(phone, message):
params = {
"apikey": os.getenv("TEXTLOCAL_API_KEY"),
"numbers": phone,
"message": message,
"sender": "CTZNVS",
"test": True,
}
response = requests.get(url, params=params)
return response.text
|
the-stack_0_17966 | import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, tier1
from ocs_ci.ocs.resources import pod
from ocs_ci.ocs.cluster import get_pg_balancer_status
log = logging.getLogger(__name__)
@tier1
@pytest.mark.polarion_id("OCS-2231")
class TestCephDefaultValuesCheck(ManageTest):
def test_ceph_default_values_check(self):
"""
This test checks ceph default values taken from OCS 4.3 with the
current values in the cluster
"""
# The default ceph osd full ratio values
expected_full_ratios = {
'full_ratio': 0.85,
'backfillfull_ratio': 0.8,
'nearfull_ratio': 0.75
}
actual_full_ratios = {}
ct_pod = pod.get_ceph_tools_pod()
log.info("Checking the values of ceph osd full ratios in osd map")
osd_dump_dict = ct_pod.exec_ceph_cmd('ceph osd dump')
for ratio_parm, value in expected_full_ratios.items():
ratio_value = osd_dump_dict.get(ratio_parm)
actual_full_ratios[ratio_parm] = float(round(ratio_value, 2))
if not float(round(ratio_value, 2)) == value:
log.error(
f"Actual {ratio_parm} value is {ratio_value:.2f} NOT "
f"matching the expected value {value}"
)
assert expected_full_ratios == actual_full_ratios, (
"Actual full ratio values does not match expected full "
"ratio values"
)
log.info(
f"Actual full ratio {actual_full_ratios} values MATCHES expected "
f"full ratio values {expected_full_ratios}"
)
# Check if the osd full ratios satisfies condition
# "nearfull < backfillfull < full"
assert (
osd_dump_dict[
'nearfull_ratio'
] < osd_dump_dict[
'backfillfull_ratio'
] < osd_dump_dict[
'full_ratio'
]
), (
"osd full ratio values does not satisfy condition "
f"{osd_dump_dict['nearfull_ratio']:.2f} < "
f"{osd_dump_dict['backfillfull_ratio']:.2f} < "
f"{osd_dump_dict['full_ratio']:.2f}"
)
log.info(
"osd full ratio values satisfies condition "
f"{osd_dump_dict['nearfull_ratio']:.2f} < "
f"{osd_dump_dict['backfillfull_ratio']:.2f} < "
f"{osd_dump_dict['full_ratio']:.2f}"
)
# Check if PG balancer is active
assert get_pg_balancer_status(), "PG balancer is not active"
|
the-stack_0_17969 | import sys
import typing
import numpy as np
def set_val(
a: np.array,
i: int,
x: int,
) -> typing.NoReturn:
while i < a.size:
a[i] = max(a[i], x)
i += i & -i
def get_mx(
a: np.array,
i: int,
) -> int:
mx = 0
while i > 0:
mx = max(mx, a[i])
i -= i & -i
return mx
def solve(
n: int,
h: np.array,
a: np.array,
) -> typing.NoReturn:
fw = np.zeros(
n + 1,
dtype=np.int64,
)
mx = 0
for i in range(n):
v = get_mx(fw, h[i] - 1)
set_val(fw, h[i], v + a[i])
print(get_mx(fw, n))
def main() -> typing.NoReturn:
n = int(input())
h = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, h, a)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
sig = (i8, i8[:], i8[:])
get_mx = njit(get_mx)
set_val = njit(set_val)
cc.export(
fn.__name__,
sig,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
|
the-stack_0_17970 | #SwapDigitizerNumber.py
#This script swaps the tree references associating coil signal lines with a particular digitizer. When ACQ_216_1 died, we had to move the coils to ACQ_216_3. Now that ACQ_216_1 is resurrected, we need to switch the references back.
#
#Usage:
# python SwapDigitizerNumber.py [s1 [s2]]
#
# Default: Shot -1 (model tree)
# range between shots s1 and s2, including s1 and s2.
#Ted Golfinopoulos, 25 Apr 2012
from MDSplus import *
import sys #For getting command line arguments
import re #Import regular expressions.
#Parse command line arguments.
if(len(sys.argv)>1) :
s1=int(sys.argv[1]) #Grab shot number from command line.
else :
s1=-1 #Default to shot -1
if(len(sys.argv)>2) :
s2=int(sys.argv[2]) #Grab shot number from command line.
elif(s1==-1) :
s2=s1 #If s1 is the model tree, only do s=-1; don't run to s=0
else :
s2=s1 #Only do a single shot
digFrom='ACQ_216_3' #Change digitizer from this
digTo='ACQ_216_1' #Change digitizer to this
#Loop through range of shots
for s in range(s1,s2+1) :
tree=Tree('magnetics',s)
nodeArr=tree.getNode('active_mhd.signals').getNodeWild('BP*') #Grab all shoelace subnodes
#Loop through all nodes
for n in nodeArr :
#print(n)
try :
expr=n.getData()
#print(str(expr))
try :
if (len(re.findall(digFrom, str(expr)))>0) :#If there are matches, replace old digitizer name with new.
newExpr=re.sub(digFrom, digTo, str(expr)) #Need to to-string expression in order for regular expression to work.
#print(str(n) + ' -> ' + str(newExpr))
n.putData(Data.compile(newExpr)) #Put new expression into node.
print( str(n)+" --- Now contains: "+str(n.getData()) )
except : print("String replacement didn't work. Expr was "+str(expr))
except TreeNoDataException :
#Continue
print("No data in "+n.getPath()+"; moving on.")
|
the-stack_0_17971 | """SimulationOperator를 사용해서 시뮬레이션을 컨트롤하는 Simulator 클래스"""
import signal
import time
from . import (
LogManager,
Analyzer,
SimulationTrader,
SimulationDataProvider,
StrategyBuyAndHold,
StrategySma0,
SimulationOperator,
DateConverter,
)
class Simulator:
"""자동 거래 시뮬레이터 클래스
command_list:
{
guide: 화면에 출력될 명령어와 안내문
cmd: 입력 받은 명령어와 매칭되는 문자열
action: 명령어가 입력되었을때 실행되는 객체
}
config_list:
{
guide: 화면에 출력될 설정값과 안내문
value: 출력될 현재값
action: 입력 받은 설정값을 처리해주는 객체
}
"""
MAIN_STATEMENT = "input command (h:help): "
def __init__(
self,
budget=50000,
interval=2,
strategy=0,
from_dash_to="201220.170000-201220.180000",
currency="BTC",
):
self.logger = LogManager.get_logger("Simulator")
self.__terminating = False
self.start_str = "200430.170000"
self.end_str = "200430.180000"
self.interval = interval
self.operator = None
self.strategy = int(strategy)
self.budget = int(budget)
self.need_init = True
self.currency = currency
self.interval = float(self.interval)
start_end = from_dash_to.split("-")
self.start_str = start_end[0]
self.end_str = start_end[1]
self.command_list = [
{
"guide": "h, help print command info",
"cmd": ["help", "h"],
"action": self.print_help,
},
{
"guide": "r, run start running simulation",
"cmd": ["run", "r"],
"action": self.start,
},
{
"guide": "s, stop stop running simulation",
"cmd": ["stop", "s"],
"action": self._stop,
},
{
"guide": "t, terminate terminate simulator",
"cmd": ["terminate", "t"],
"action": self.terminate,
},
{
"guide": "i, initialize initialize simulation",
"cmd": ["initialize", "i"],
"action": self.initialize_with_command,
},
{
"guide": "1, state query operating state",
"cmd": ["1"],
"action": self._print_state,
},
{
"guide": "2, score query current score",
"cmd": ["2"],
"action": self._print_score,
},
{
"guide": "3, result query trading result",
"cmd": ["3"],
"action": self._print_trading_result,
},
]
self.config_list = [
{
"guide": "년월일.시분초 형식으로 시작 시점 입력. 예. 201220.162300",
"value": self.start_str,
"action": self._set_start_str,
},
{
"guide": "년월일.시분초 형식으로 종료 시점 입력. 예. 201220.162300",
"value": self.end_str,
"action": self._set_end_str,
},
{
"guide": "거래 간격 입력. 예. 1",
"value": self.interval,
"action": self._set_interval,
},
{
"guide": "예산 입력. 예. 50000",
"value": self.budget,
"action": self._set_budget,
},
{
"guide": "전략 번호 입력. 0: Buy and Hold, 1: SMA-0",
"value": self.strategy,
"action": self._set_strategy,
},
{
"guide": "화폐 코드 입력. BTC, ETH",
"value": self.currency,
"action": self._set_currency,
},
]
def initialize(self):
"""시뮬레이션 초기화"""
dt = DateConverter.to_end_min(self.start_str + "-" + self.end_str)
end = dt[0][1]
count = dt[0][2]
if self.strategy == 0:
strategy = StrategyBuyAndHold()
else:
strategy = StrategySma0()
strategy.is_simulation = True
self.operator = SimulationOperator()
self._print_configuration(strategy.NAME)
data_provider = SimulationDataProvider(currency=self.currency)
data_provider.initialize_simulation(end=end, count=count)
trader = SimulationTrader(currency=self.currency)
trader.initialize_simulation(end=end, count=count, budget=self.budget)
analyzer = Analyzer()
analyzer.is_simulation = True
self.operator.initialize(
data_provider,
strategy,
trader,
analyzer,
budget=self.budget,
)
self.operator.tag = self._make_tag(self.start_str, self.end_str, strategy.NAME)
self.operator.set_interval(self.interval)
self.need_init = False
@staticmethod
def _make_tag(start_str, end_str, strategy_name):
return "SIM-" + strategy_name + "-" + start_str + "-" + end_str
def start(self):
"""시뮬레이션 시작, 재시작"""
if self.operator is None or self.need_init:
self._print("초기화가 필요합니다")
return
self.logger.info("Simulation start! ============================")
if self.operator.start() is not True:
self._print("Fail operator start")
return
def stop(self, signum, frame):
"""시뮬레이션 중지"""
self._stop()
self.__terminating = True
self._print(f"Receive Signal {signum} {frame}")
self._print("Stop Singing")
def _stop(self):
if self.operator is not None:
self.operator.stop()
self.need_init = True
self._print("프로그램을 재시작하려면 초기화하세요")
def terminate(self):
"""시뮬레이터 종료"""
self._print("Terminating......")
self._stop()
self.__terminating = True
self._print("Good Bye~")
def run_single(self):
"""인터렉션 없이 초기 설정 값으로 단독 1회 실행"""
self.initialize()
self.start()
while self.operator.state == "running":
time.sleep(0.5)
self.terminate()
def main(self):
"""main 함수"""
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
while not self.__terminating:
try:
key = input(self.MAIN_STATEMENT)
self.on_command(key)
except EOFError:
break
def on_command(self, key):
"""커맨드 처리"""
for cmd in self.command_list:
if key.lower() in cmd["cmd"]:
cmd["action"]()
return
self._print("invalid command")
def print_help(self):
"""가이드 문구 출력"""
self._print("command list =================")
for item in self.command_list:
self._print(item["guide"], True)
def initialize_with_command(self):
"""설정 값을 입력받아서 초기화 진행"""
for config in self.config_list:
self._print(config["guide"])
value = input(f"현재값: {config['value']} >> ")
value = config["value"] if value == "" else value
self._print(f"설정값: {value}")
config["action"](value)
self.initialize()
def _set_start_str(self, value):
self.start_str = value
def _set_end_str(self, value):
self.end_str = value
def _set_interval(self, value):
next_value = float(value)
if next_value > 0:
self.interval = next_value
def _set_budget(self, value):
next_value = int(value)
if next_value > 0:
self.budget = next_value
def _set_strategy(self, value):
self.strategy = int(value)
def _set_currency(self, value):
self.currency = value
def _print_state(self):
if self.operator is None:
self._print("초기화가 필요합니다")
return
self._print(self.operator.state)
def _print_configuration(self, strategy_name):
self._print("Simulation Configuration =====")
self._print(f"Simulation Period {self.start_str} ~ {self.end_str}")
self._print(f"Budget: {self.budget}, Interval: {self.interval}")
self._print(f"Strategy: {strategy_name}")
def _print_score(self):
def print_score_and_main_statement(score):
self._print("current score ==========")
self._print(score)
self._print(self.MAIN_STATEMENT)
self.operator.get_score(print_score_and_main_statement)
def _print_trading_result(self):
results = self.operator.get_trading_results()
if results is None or len(results) == 0:
self._print("거래 기록이 없습니다")
return
for result in results:
self._print(f"@{result['date_time']}, {result['type']}")
self._print(f"{result['price']} x {result['amount']}")
def _print(self, contents, logger_skip=False):
if logger_skip is not True:
self.logger.info(contents)
print(contents)
|
the-stack_0_17972 | # Copyright 2017,2021 Niall McCarroll
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import os
import os.path
from time import sleep
from os.path import exists
import gc
import copy
from tests.test_utils import TestUtils
from treehaus import TreeHaus
class TestWorkload(unittest.TestCase):
def test_workload(self):
self.store = TestUtils.open()
self.index = self.store.getIndex("index1")
self.test = {} # mirror the index contents in a dict
num_versions = 20
num_traverses = 0
checkpoints = []
random.seed(21)
for v in range(0,num_versions):
TestUtils.alter(self.index,self.test,200,1,0.2)
checkpoint_number = self.store.commit()
if checkpoint_number:
checkpoints.append((checkpoint_number,copy.deepcopy(self.test)))
TestUtils.check(self.index,self.test)
TestUtils.traverse_check(self.index,self.test,None,None,False)
TestUtils.traverse_check(self.index,self.test,None,None,True)
for i in range(0,num_traverses):
(lwb,upb) = test_utils.make_key_pair(5)
TestUtils.traverse_check(self.index,self.test,lwb,upb,True)
TestUtils.traverse_check(self.index,self.test,lwb,None,True)
TestUtils.traverse_check(self.index,self.test,lwb,upb,False)
TestUtils.traverse_check(self.index,self.test,None,upb,True)
TestUtils.traverse_check(self.index,self.test,None,upb,False)
for (checkpoint_number, test_dict) in checkpoints:
with TreeHaus.open(TestUtils.PATH,openAtUpdate=checkpoint_number) as cp:
cp_index = cp.getIndex("index1")
TestUtils.check(cp_index,test_dict)
if __name__ == '__main__':
unittest.main() |
the-stack_0_17975 | '''
Copyright 2021 D3M Team
Copyright (c) 2021 DATA Lab at Texas A&M University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from d3m import container
from d3m.metadata import hyperparams
import imgaug.augmenters as iaa
import typing
from autovideo.utils import construct_primitive_metadata
from autovideo.base.augmentation_base import AugmentationPrimitiveBase
__all__ = ('ShearYPrimitive',)
Inputs = container.DataFrame
class Hyperparams(hyperparams.Hyperparams):
shear = hyperparams.Hyperparameter[typing.Union[float,tuple,list]](
default=(-20, 20),
description="Shear in degrees (NOT radians), i.e. expected value range is around [-360, 360], with reasonable values being in the range of [-45, 45].",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
seed = hyperparams.Constant[int](
default=0,
description='Minimum workers to extract frames simultaneously',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
order = hyperparams.Hyperparameter[typing.Union[int,list]](
default=1,
description="interpolation order to use",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
cval = hyperparams.Hyperparameter[typing.Union[float,tuple,list]](
default=(0,255),
description=" The constant value to use when filling in newly created pixels.",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
mode = hyperparams.Hyperparameter[typing.Union[str,list]](
default='constant',
description="Method to use when filling in newly created pixels",
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
class ShearYPrimitive(AugmentationPrimitiveBase[Inputs, Hyperparams]):
"""
A primitive which Apply affine shear on the y-axis to input data.
"""
metadata = construct_primitive_metadata("augmentation", "geometric_ShearY")
def _get_function(self):
"""
set up function and parameter of functions
"""
shear = self.hyperparams["shear"]
seed = self.hyperparams["seed"]
order = self.hyperparams["order"]
cval = self.hyperparams["cval"]
mode = self.hyperparams["mode"]
return iaa.ShearY(shear=shear, seed=seed, order=order, cval=cval, mode=mode)
|
the-stack_0_17977 | import random
from mesa import Agent
class Cop(Agent):
def __init__(self, unique_id, model, pos, vision):
super().__init__(unique_id, model)
self.breed = "cop"
self.pos = pos
self.vision = vision
self.can_arrest = True
self.arrested_step = 0
self.wait_for = 0 # no of steps to wait before arresting someone else
def step(self):
"""
Inspect local vision and arrest a random active agent. Move if
applicable.
"""
# check whether they can arrest again
if not self.can_arrest and self.wait_for == 0:
self.can_arrest = True
else:
self.wait_for -= 1
self.update_neighbors()
active_neighbors, deviant_neighbors, cop_neighbors = [], [], []
for agent in self.neighbors:
if (
agent.breed == "citizen"
and agent.condition == "Active"
and not agent.jail_sentence
):
active_neighbors.append(agent)
if agent.breed == "cop":
cop_neighbors.append(agent)
if (
agent.breed == "citizen"
and agent.condition == "Deviant"
and not agent.jail_sentence
):
deviant_neighbors.append(agent)
if (
self.can_arrest
and self.model.jail_capacity > len(self.model.jailed_agents)
and len(cop_neighbors) > 1
):
arrestee = None
if deviant_neighbors:
possibles = []
for agent in deviant_neighbors:
if agent.steps_active >= 3:
possibles.append(agent)
arrestee = self.random.choice(possibles) if possibles else None
elif active_neighbors:
possibles = []
for agent in active_neighbors:
if agent.steps_active >= 3:
possibles.append(agent)
arrestee = self.random.choice(possibles) if possibles else None
if arrestee:
arrestee.jail_sentence = True
self.model.arrested_agents.append(arrestee)
self.can_arrest = False
self.wait_for = 15
if self.model.movement and self.empty_neighbors and self.can_arrest:
useful_move = self.move_towards_actives()
if useful_move:
self.model.grid.move_agent(self, useful_move)
else:
self.model.grid.move_agent(
self, self.random.choice(self.empty_neighbors)
)
def move_towards_actives(self):
neighborhood = self.model.grid.get_neighborhood(
self.pos, moore=False, radius=self.vision
)
deviants, actives = [], []
for x in neighborhood:
neighbor = self.model.grid.get_cell_list_contents(x)
if neighbor and neighbor[0].breed == "citizen":
if neighbor[0].condition == "Deviant":
deviants.append(x)
if neighbor[0].condition == "Active":
actives.append(x)
if deviants:
toward = random.choice(deviants)
elif actives:
toward = random.choice(actives)
else:
return None
dict = {
"left": (self.pos[0] - 1, self.pos[1]),
"right": (self.pos[0] + 1, self.pos[1]),
"up": (self.pos[0], self.pos[1] - 1),
"down": (self.pos[0], self.pos[1] + 1),
}
new_pos = []
if toward:
if toward[0] > self.pos[0] and self.model.grid.is_cell_empty(
dict["right"]
): # citizen is more right than cop
new_pos.append("right")
elif toward[0] < self.pos[0] and self.model.grid.is_cell_empty(
dict["left"]
): # citizen is more left than cop
new_pos.append("left")
if toward[1] > self.pos[1] and self.model.grid.is_cell_empty(
dict["down"]
): # citizen is further down than cop
new_pos.append("down")
elif toward[1] < self.pos[1] and self.model.grid.is_cell_empty(
dict["up"]
): # citizen is further up than cop
new_pos.append("up")
new_pos = dict[random.choice(new_pos)] if new_pos else None
return new_pos
def update_neighbors(self):
"""
Look around and see who my neighbors are.
"""
self.neighborhood = self.model.grid.get_neighborhood(
self.pos, moore=False, radius=1
)
self.neighbors = self.model.grid.get_cell_list_contents(self.neighborhood)
self.empty_neighbors = [
c for c in self.neighborhood if self.model.grid.is_cell_empty(c)
]
|
the-stack_0_17978 | import warnings
import pytest
import numpy as np
from datetime import date
import dateutil
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas import (DatetimeIndex, Index, date_range, DataFrame,
Timestamp, offsets)
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndex(object):
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
@pytest.mark.parametrize('periods', [0, 9999, 10000, 10001])
def test_iteration_over_chunksize(self, periods):
# GH21012
index = date_range('2000-01-01 00:00:00', periods=periods, freq='min')
num = 0
for stamp in index:
assert index[num] == stamp
num += 1
assert num == len(index)
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
with warnings.catch_warnings(record=True):
# Deprecated - see GH20239
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self, join_type):
index = date_range('1/1/2000', periods=10)
joined = index.join(index, how=join_type)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1],
freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self, join_type):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:5, 0]
with tm.assert_raises_regex(ValueError,
'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join_type)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_factorize_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#13750
base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(res, base)
def test_factorize_dst(self):
# GH 13750
idx = pd.date_range('2016-11-06', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
idx = pd.date_range('2016-06-13', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
@pytest.mark.parametrize('arr, expected', [
(pd.DatetimeIndex(['2017', '2017']), pd.DatetimeIndex(['2017'])),
(pd.DatetimeIndex(['2017', '2017'], tz='US/Eastern'),
pd.DatetimeIndex(['2017'], tz='US/Eastern')),
])
def test_unique(self, arr, expected):
result = arr.unique()
tm.assert_index_equal(result, expected)
|
the-stack_0_17979 |
import numpy as np
import time
from openvino.inference_engine import IENetwork, IECore
import os
import cv2
import argparse
import sys
class Queue:
'''
Class for dealing with queues
'''
def __init__(self):
self.queues=[]
def add_queue(self, points):
self.queues.append(points)
def get_queues(self, image):
for q in self.queues:
x_min, y_min, x_max, y_max=q
frame=image[y_min:y_max, x_min:x_max]
yield frame
def check_coords(self, coords, frame):
d={k+1:0 for k in range(len(self.queues))}
for coord in coords:
for i, q in enumerate(self.queues):
if coord[0]>q[0] and coord[2]<q[2]:
d[i+1]+=1
#cv2.rectangle(frame, (coord[0], coord[1]), (coord[2], coord[3]), (0, 55, 255), 4)
return d, frame
class PersonDetect:
'''
Class for the Person Detection Model.
'''
def __init__(self, model_name, device, threshold=0.60):
self.model_weights=model_name+'.bin'
self.model_structure=model_name+'.xml'
self.device=device
self.threshold=threshold
self.initial_w = ''
self.initial_h = ''
try:
self.model=IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.model.inputs))
self.input_shape=self.model.inputs[self.input_name].shape
self.output_name=next(iter(self.model.outputs))
self.output_shape=self.model.outputs[self.output_name].shape
def load_model(self):
self.core = IECore()
self.net = self.core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, image):
input_img = self.preprocess_input(image)
input_dict = {self.input_name:input_img}
self.net.start_async(request_id=0,inputs=input_dict)
status = self.net.requests[0].wait(-1)
if status == 0:
results = self.net.requests[0].outputs[self.output_name]
image,coords = self.draw_outputs(results, image)
return coords,image
def draw_outputs(self, results, frame):
lst=[]
for obj in results[0][0]:
# Draw bounding box for object when it's probability is more than the specified threshold
if obj[2] > self.threshold:
xmin = int(obj[3] * self.initial_w)
ymin = int(obj[4] * self.initial_h)
xmax = int(obj[5] * self.initial_w)
ymax = int(obj[6] * self.initial_h)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 55, 255), 4)
c = [xmin,ymin,xmax,ymax]
lst.append(c)
return frame,lst
def preprocess_input(self, image):
n, c, h, w = self.input_shape
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
image = image.reshape((n, c, h, w))
return image
def main(args):
model=args.model
device=args.device
video_file=args.video
max_people=args.max_people
threshold=args.threshold
output_path=args.output_path
start_model_load_time=time.time()
pd= PersonDetect(model, device, threshold)
pd.load_model()
total_model_load_time = time.time() - start_model_load_time
queue=Queue()
try:
queue_param=np.load(args.queue_param)
for q in queue_param:
queue.add_queue(q)
except:
print("error loading queue param file")
try:
cap=cv2.VideoCapture(video_file)
except FileNotFoundError:
print("Cannot locate video file: "+ video_file)
except Exception as e:
print("Something else went wrong with the video file: ", e)
pd.initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
pd.initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
out_video = cv2.VideoWriter(os.path.join(output_path, 'output_video.mp4'), cv2.VideoWriter_fourcc(*'avc1'), fps, (pd.initial_w, pd.initial_h), True)
counter=0
start_inference_time=time.time()
try:
while cap.isOpened():
ret, frame=cap.read()
if not ret:
break
counter+=1
coords, image= pd.predict(frame)
num_people, image= queue.check_coords(coords,image)
print(f"Total People in frame = {len(coords)}")
print(f"Number of people in queue = {num_people}")
out_text=""
y_pixel=45
#cv2.putText(image, f"Total People in frame = {len(coords)}", (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 2)
for k, v in num_people.items():
out_text += f"No. of People in Queue {k} is {v} "
if v >= int(max_people):
out_text += f" Queue full; Please move to next Queue "
cv2.putText(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
out_text=""
y_pixel+=40
out_video.write(image)
total_time=time.time()-start_inference_time
total_inference_time=round(total_time, 1)
fps=counter/total_inference_time
with open(os.path.join(output_path, 'stats.txt'), 'w') as f:
f.write(str(total_inference_time)+'\n')
f.write(str(fps)+'\n')
f.write(str(total_model_load_time)+'\n')
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print("Could not run Inference: ", e)
if __name__=='__main__':
parser=argparse.ArgumentParser()
parser.add_argument('--model', required=True)
parser.add_argument('--device', default='CPU')
parser.add_argument('--video', default=None)
parser.add_argument('--queue_param', default=None)
parser.add_argument('--output_path', default='/results')
parser.add_argument('--max_people', default=2)
parser.add_argument('--threshold', default=0.60)
args=parser.parse_args()
main(args) |
the-stack_0_17982 | # coding: utf-8
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def setup_and_run_tests(test_labels=None):
"""Discover and run project tests. Returns number of failures."""
test_labels = test_labels or ['fack.tests']
# noinspection PyStringFormat
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1)
return test_runner.run_tests(test_labels)
def runtests(test_labels=None):
"""Run project tests and exit"""
# Used as setup test_suite: must either exit or return a TestSuite
failures = setup_and_run_tests(test_labels)
sys.exit(bool(failures))
if __name__ == '__main__':
runtests(test_labels=sys.argv[1:])
|
the-stack_0_17983 | import argparse
from collections import deque
def word_generator(productions, init_symbol_1, init_symbol_2, input_symbol, counter):
q = deque([init_symbol_1])
st = set()
productions_list = list()
result_productions_dict = dict()
result_rules_dict = dict()
while len(q):
word = q.popleft()
if word not in st:
st.add(word)
if all(c == input_symbol for c in word):
if counter == len(word):
prime_number_word = word
result_productions_dict[prime_number_word] = ''
result_rules_dict[prime_number_word] = 'Word was applied'
productions_list.reverse()
for lp, rp, left, right in productions_list:
if rp in result_productions_dict.keys():
result_productions_dict[lp] = rp
result_rules_dict[lp] = left + ' -> ' + right
result_file = open('./prime_checker_result.txt', 'w')
for key, value in result_productions_dict.items().__reversed__():
result_file.write('Applied rule: ' + result_rules_dict[key] + '\nResult replacement: ' + key + ' -> ' + value + '\n\n')
result_file.close()
else:
result_file = open('./prime_checker_result.txt', 'w')
result_file.write(f'{counter} is not a prime number')
result_file.close()
yield word
else:
for left, right in productions:
if left in word:
new_word = word.replace(left, right)
productions_list.append((word, new_word, left, right))
if any(S in new_word for S in [init_symbol_1, init_symbol_2]):
q.append(new_word)
else:
q.appendleft(new_word)
def read_free_grammar(path):
grammar = open(path)
str_productions = [line.strip('\n') for line in grammar.readlines()]
productions = []
for line in str_productions:
line = line.split(' -> ')
productions += [tuple(line)] if len(line) > 1 else [(line[0], '')]
grammar.close()
return productions
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--grammar_path", help="Path to file with grammar",
type=str, default="./free_prime_grammar.txt")
parser.add_argument("-n", help="Number to check", type=int)
args = parser.parse_args()
productions = read_free_grammar(args.grammar_path)
gen = word_generator(productions, 'First', 'Second', 'I', args.n)
is_end = False
is_prime = False
while not is_end:
next_word = gen.__next__()
is_end = len(next_word) >= args.n
is_prime = len(next_word) == args.n
if is_prime:
print(f'{args.n} is a prime number')
else:
print(f'{args.n} is not a prime number')
if __name__ == '__main__':
main() |
the-stack_0_17984 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-name-in-module,import-error
from azure.cli.core.commands import CliCommandType
from azure.cli.core.commands.arm import deployment_validate_table_format
from ._client_factory import cf_container_services
from ._client_factory import cf_managed_clusters
from ._client_factory import cf_agent_pools
from ._client_factory import cf_openshift_managed_clusters
from ._format import aks_list_table_format
from ._format import aks_show_table_format
from ._format import aks_agentpool_show_table_format
from ._format import aks_agentpool_list_table_format
from ._format import osa_list_table_format
from ._format import aks_upgrades_table_format
from ._format import aks_versions_table_format
# pylint: disable=too-many-statements
def load_command_table(self, _):
container_services_sdk = CliCommandType(
operations_tmpl='azure.mgmt.containerservice.v2017_07_01.operations.'
'_container_services_operations#ContainerServicesOperations.{}',
client_factory=cf_container_services
)
managed_clusters_sdk = CliCommandType(
operations_tmpl='azure.mgmt.containerservice.v2020_09_01.operations.'
'_managed_clusters_operations#ManagedClustersOperations.{}',
client_factory=cf_managed_clusters
)
agent_pools_sdk = CliCommandType(
operations_tmpl='azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.'
'operations._agent_pools_operations#AgentPoolsOperations.{}',
client_factory=cf_managed_clusters
)
openshift_managed_clusters_sdk = CliCommandType(
operations_tmpl='azure.mgmt.containerservice.v2018_09_30_preview.operations.'
'_open_shift_managed_clusters_operations#OpenShiftManagedClustersOperations.{}',
client_factory=cf_openshift_managed_clusters
)
# ACS base commands
# TODO: When the first azure-cli release after January 31, 2020 is planned, add
# `expiration=<CLI core version>` to the `self.deprecate()` args below.
deprecate_info = self.deprecate(redirect='aks', hide=True)
with self.command_group('acs', container_services_sdk, deprecate_info=deprecate_info,
client_factory=cf_container_services) as g:
g.custom_command('browse', 'acs_browse')
g.custom_command('create', 'acs_create', supports_no_wait=True,
table_transformer=deployment_validate_table_format)
g.command('delete', 'delete', confirmation=True)
g.custom_command('list', 'list_container_services')
g.custom_command('list-locations', 'list_acs_locations')
g.custom_command('scale', 'update_acs')
g.show_command('show', 'get')
g.wait_command('wait')
# ACS Mesos DC/OS commands
with self.command_group('acs dcos', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('browse', 'dcos_browse')
g.custom_command('install-cli', 'dcos_install_cli', client_factory=None)
# ACS Kubernetes commands
with self.command_group('acs kubernetes', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('browse', 'k8s_browse')
g.custom_command('get-credentials', 'k8s_get_credentials')
g.custom_command('install-cli', 'k8s_install_cli', client_factory=None)
# AKS commands
with self.command_group('aks', managed_clusters_sdk, client_factory=cf_managed_clusters) as g:
g.custom_command('browse', 'aks_browse')
g.custom_command('create', 'aks_create', supports_no_wait=True)
g.custom_command('update', 'aks_update', supports_no_wait=True)
g.command('delete', 'delete', supports_no_wait=True, confirmation=True)
g.custom_command('update-credentials', 'aks_update_credentials', supports_no_wait=True)
g.custom_command('disable-addons', 'aks_disable_addons', supports_no_wait=True)
g.custom_command('enable-addons', 'aks_enable_addons', supports_no_wait=True)
g.custom_command('get-credentials', 'aks_get_credentials')
g.custom_command('check-acr', 'aks_check_acr')
g.command('get-upgrades', 'get_upgrade_profile', table_transformer=aks_upgrades_table_format)
g.custom_command('install-cli', 'k8s_install_cli', client_factory=None)
g.custom_command('list', 'aks_list', table_transformer=aks_list_table_format)
g.custom_command('remove-dev-spaces', 'aks_remove_dev_spaces', deprecate_info=g.deprecate())
g.custom_command('scale', 'aks_scale', supports_no_wait=True)
g.custom_show_command('show', 'aks_show', table_transformer=aks_show_table_format)
g.custom_command('upgrade', 'aks_upgrade', supports_no_wait=True)
g.custom_command('use-dev-spaces', 'aks_use_dev_spaces', deprecate_info=g.deprecate())
g.custom_command('rotate-certs', 'aks_rotate_certs', supports_no_wait=True,
confirmation='Kubernetes will be unavailable during certificate rotation process.\n' +
'Are you sure you want to perform this operation?')
g.wait_command('wait')
g.command('stop', 'stop', supports_no_wait=True)
g.command('start', 'start', supports_no_wait=True)
with self.command_group('aks', container_services_sdk, client_factory=cf_container_services) as g:
g.custom_command('get-versions', 'aks_get_versions', table_transformer=aks_versions_table_format)
# AKS agent pool commands
with self.command_group('aks nodepool', agent_pools_sdk, client_factory=cf_agent_pools) as g:
g.custom_command('list', 'aks_agentpool_list', table_transformer=aks_agentpool_list_table_format)
g.custom_show_command('show', 'aks_agentpool_show', table_transformer=aks_agentpool_show_table_format)
g.custom_command('add', 'aks_agentpool_add', supports_no_wait=True)
g.custom_command('scale', 'aks_agentpool_scale', supports_no_wait=True)
g.custom_command('upgrade', 'aks_agentpool_upgrade', supports_no_wait=True)
g.custom_command('update', 'aks_agentpool_update', supports_no_wait=True)
g.custom_command('delete', 'aks_agentpool_delete', supports_no_wait=True)
g.custom_command('get-upgrades', 'aks_agentpool_get_upgrade_profile')
# OSA commands
with self.command_group('openshift', openshift_managed_clusters_sdk,
client_factory=cf_openshift_managed_clusters) as g:
g.custom_command('create', 'openshift_create', supports_no_wait=True)
g.command('delete', 'delete', supports_no_wait=True, confirmation=True)
g.custom_command('scale', 'openshift_scale', supports_no_wait=True)
g.custom_show_command('show', 'openshift_show')
g.custom_command('list', 'osa_list', table_transformer=osa_list_table_format)
g.wait_command('wait')
# OSA monitor subgroup
with self.command_group('openshift monitor', openshift_managed_clusters_sdk,
client_factory=cf_openshift_managed_clusters) as g:
g.custom_command('enable', 'openshift_monitor_enable', supports_no_wait=True)
g.custom_command('disable', 'openshift_monitor_disable', supports_no_wait=True)
|
the-stack_0_17985 | # coding :utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#from .market_config import stock_market,future_market,HK_stock_market,US_stock_market
from QUANTAXIS.QAUtil import QA_util_log_info, QA_util_random_with_topic
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, TRADE_STATUS
"""撮合类
输入是
self.market_data
self.order
rules
输出是
standard message
"""
class commission():
if_buyside_commission = False
if_sellside_commission = True
if_commission = if_buyside_commission and if_sellside_commission
class dealer_preset():
def __init__(self, market_type, *args, **kwargs):
self.market_type = market_type
self.if_price_limit = None # 是否限制涨跌停(美股/加密货币不限制)
self.if_commission = None # 是否收手续费(部分合约/部分加密货币不收手续费)
self.if_tax = None # 是否收税
self.if_t0 = None # 是否t+0
self.if_sellopen = None # 是否允许卖空
self.trading_time = None # 交易时间
self.commission_coeff = None # 手续费比例
self.tax_coeff = None # 费率
def load_preset(self):
if self.market_type is MARKET_TYPE.STOCK_CN:
self.if_price_limit = True # 是否限制涨跌停(美股/加密货币不限制)
self.if_commission = True # 是否收手续费(部分合约/部分加密货币不收手续费)
self.if_tax = True # 是否收税
self.if_t0 = False # 是否t+0
self.if_sellopen = False # 是否允许卖空
self.trading_time = [[930, 1130], [1300, 1500]] # 交易时间
self.commission_coeff = 0.00025 # 手续费比例
self.tax_coeff = 0.001 # 费率
return self
elif self.market_type is MARKET_TYPE.FUTURE_CN:
self.if_price_limit = True # 是否限制涨跌停(美股/加密货币不限制)
self.if_commission = True # 是否收手续费(部分合约/部分加密货币不收手续费)
self.if_tax = False # 是否收税
self.if_t0 = True # 是否t+0
self.if_sellopen = True # 是否允许卖空
self.trading_time = [[930, 1130], [1300, 1500]] # 交易时间
self.commission_coeff = 0.00025 # 手续费比例
self.tax_coeff = 0 # 费率
else:
pass
return self
class QA_Dealer():
"""[summary]
对于不同的市场规则:
股票市场 t+1
期货/期权/加密货币市场 t+0
股票/加密货币市场不允许卖空
期货/期权市场允许卖空
t+1的市场是
当日的买入 更新持仓- 不更新可卖数量- 资金冻结
当日的卖出 及时更新可用资金
t+0市场是:
当日买入 即时更新持仓和可卖
当日卖出 即时更新
卖空的规则是
允许无仓位的时候卖出证券(按市值和保证金比例限制算)
"""
def __init__(self, commission_fee_coeff=0.00025, tax_coeff=0.001, *args, **kwargs):
self.commission_fee_coeff = commission_fee_coeff
self.tax_coeff = tax_coeff
self.deal_name = ''
self.deal_engine = {'0x01': self.backtest_stock_dealer}
self.session = {}
self.order = None
self.market_data = None
self.commission_fee = None
self.tax = None
self.status = None
def deal(self, order, market_data):
self.order = order
self.market_data = market_data
self.deal_price = 0
self.deal_amount = 0
self.commission_fee_coeff=order.commission_coeff
self.tax_coeff=order.tax_coeff
if order.market_type is MARKET_TYPE.STOCK_CN:
return self.backtest_stock_dealer()
def callback_message(self):
# 这是标准的return back message
message = {
'header': {
'source': 'market',
'status': self.status,
'code': self.order.code,
'session': {
'user': self.order.user,
'strategy': self.order.strategy,
'account': self.order.account_cookie
},
'order_id': self.order.order_id,
'trade_id': QA_util_random_with_topic('Trade')
},
'body': {
'order': {
'price': float("%.2f" % float(self.deal_price)),
'code': self.order.code,
'amount': self.deal_amount,
'date': self.order.date,
'datetime': self.order.datetime,
'towards': self.order.towards
},
# 'market': {
# 'open': self.market_data.get('open'),
# 'high': self.market_data.get('high'),
# 'low': self.market_data.get('low'),
# 'close': self.market_data.get('close'),
# 'volume': self.market_data.get('volume'),
# 'code': self.market_data.get('code')
# },
'fee': {
'commission': self.commission_fee,
'tax': self.tax
}
}
}
return message
def cal_fee(self):
if self.order.market_type is MARKET_TYPE.STOCK_CN:
if int(self.order.towards) > 0:
commission_fee = self.commission_fee_coeff * \
float(self.deal_price) * float(self.order.amount)
self.commission_fee = 5 if commission_fee < 5 else commission_fee
self.tax = 0 # 买入不收印花税
else:
commission_fee = self.commission_fee_coeff * \
float(self.deal_price) * float(self.order.amount)
self.commission_fee = 5 if commission_fee < 5 else commission_fee
self.tax = self.tax_coeff * \
float(self.deal_price) * float(self.order.amount)
elif self.order.market_type is MARKET_TYPE.FUTURE_CN:
# 期货不收税
# 双边手续费 也没有最小手续费限制
self.commission_fee = self.commission_fee_coeff * \
float(self.deal_price) * float(self.order.amount)
#self.commission_fee = 5 if commission_fee < 5 else commission_fee
self.tax = 0 # 买入不收印花税
def backtest_stock_dealer(self):
# 新增一个__commission_fee_coeff 手续费系数
"""MARKET ENGINE STOCK
在拿到市场数据后对于订单的撮合判断 生成成交信息
trading system
step1: check self.market_data
step2: deal
step3: return callback
"""
try:
if float(self.market_data.get('open')) == float(self.market_data.get('high')) == float(self.market_data.get('close')) == float(self.market_data.get('low')):
self.status = TRADE_STATUS.PRICE_LIMIT
self.deal_price = 0
self.deal_amount = 0
self.cal_fee()
return self.callback_message()
elif ((float(self.order.price) < float(self.market_data.get('high')) and
float(self.order.price) > float(self.market_data.get('low'))) or
float(self.order.price) == float(self.market_data.get('low')) or
float(self.order.price) == float(self.market_data.get('high'))):
'能成功交易的情况 有滑点调整'
if float(self.order.amount) < float(self.market_data.get('volume')) * 100 / 16:
self.deal_price = self.order.price
self.deal_amount = self.order.amount
elif float(self.order.amount) >= float(self.market_data.get('volume')) * 100 / 16 and \
float(self.order.amount) < float(self.market_data.get('volume')) * 100 / 8:
"""
add some slippers
buy_price=mean(max{open,close},high)
sell_price=mean(min{open,close},low)
"""
if int(self.order.towards) > 0:
self.deal_price = (max(float(self.market_data.get('open')), float(
self.market_data.get('close'))) + float(self.market_data.get('high'))) * 0.5
else:
self.deal_price = (min(float(self.market_data.get('open')), float(
self.market_data.get('close'))) + float(self.market_data.get('low'))) * 0.5
self.deal_amount = self.order.amount
else:
self.deal_amount = float(self.market_data.get('volume')) / 8
if int(self.order.towards) > 0:
self.deal_price = float(self.market_data.get('high'))
else:
self.deal_price = float(self.market_data.get('low'))
self.cal_fee()
self.status = TRADE_STATUS.SUCCESS
return self.callback_message()
else:
self.status = TRADE_STATUS.FAILED
self.deal_price = 0
self.deal_amount = 0
self.cal_fee()
return self.callback_message()
except Exception as e:
QA_util_log_info('MARKET ENGINE ERROR: {}'.format(e))
self.status = TRADE_STATUS.NO_MARKET_DATA
return self.callback_message()
class Stock_Dealer(QA_Dealer):
def __init__(self, *args, **kwargs):
super().__init__()
if __name__ == '__main__':
pass
|
the-stack_0_17987 | from instrument import Instrument
from visa import VisaIOError
import visa
import types
import logging
import numpy as np
import qt
class FSV_Exception(Exception):
pass
class RhodeSchwartz_FSV(Instrument):
'''
This is the driver for the Rohde & Schwarz FSV Signal Analyzer.
Usage:
Initialize with
<name> = qt.instruments.create('<name>', 'RhodeSchwartz_FSV',
address='TCPIP::<IP-address>::INSTR',
reset=<bool>,)
For GPIB the address is: 'GPIB<interface_nunmber>::<gpib-address>'
'''
def __init__(self, name, address, reset=False):
# Initialize wrapper functions
logging.info('Initializing instrument Rhode & Schwarz FSV Signal Generator')
Instrument.__init__(self, name, tags=['physical'])
# Add some global constants
self._address = address
self._default_timeout = 2000 # ms
self._visainstrument = visa.ResourceManager().open_resource(self._address,
timeout=self._default_timeout)
self._freq_unit = 1
self._freq_unit_symbol = 'Hz'
# Add parameters
self.add_parameter('centerfrequency', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=10, maxval=13.6e9,
units='Hz')
self.add_parameter('span', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=0, maxval=13.6e9,
units='Hz')
self.add_parameter('referencelevel', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=-130, maxval=0,
units='dBm', format='%.04e')
self.add_parameter('mode', type=types.StringType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
format_map = {
"SAN" : "Spectrum",
"IQ" : "IQ Analyzer",
"PNO" : "Phase Noise"
})
self.add_parameter('continuous_sweep', type=types.BooleanType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET)
self.add_parameter('sweep_points', type=types.IntType,
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
minval=101, maxval=32001)
self.add_parameter('bandwidth', type=types.FloatType,
flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET,
minval=1, maxval=10e6,
units='Hz', format='%d')
def get_instrument(self):
return self._visainstrument
def reset(self):
self._visainstrument.write('*RST')
def markers_to_peaks(self, no_of_peaks=3):
for i in range(8):
self._visainstrument.write('CALC:MARK%d OFF' % (i+1))
for i in range(no_of_peaks):
self._visainstrument.write('CALC:MARK%d ON' % (i+1))
def marker_to_max(self):
self.markers_to_peaks(1)
def set_marker_frequency(self, freq):
self._visainstrument.write('CALC:MARK1:X %dHz' % freq+';*WAI')
def set_markerN_frequency(self,n, freq):
self._visainstrument.write('CALC:MARK%d:X %dHz' %(n, freq))
def marker_next(self, marker=1):
if not int(self._visainstrument.query('CALC:MARK%d?' % (marker)).strip()):
raise FSV_Exception('Marker %d is not on' % (marker))
self._visainstrument.write('CALC:MARK%d:MAX:NEXT' % marker)
def get_max_freqs(self, no_of_peaks=3):
xvals = []
yvals = []
for i in range(no_of_peaks):
if not int(self._visainstrument.query('CALC:MARK%d?' % (i+1)).strip()):
raise FSV_Exception('Marker %d is not on' % (i+1))
xvals.append(float(self._visainstrument.query('CALC:MARK%d:X?' % (i+1)).strip()))
yvals.append(float(self._visainstrument.query('CALC:MARK%d:Y?' % (i+1)).strip()))
return xvals, yvals
# communication with machine
def do_get_centerfrequency(self):
'''
Get center frequency from device
Input:
None
Output:
centerfrequency (float) : center frequency in Hz
'''
logging.debug(__name__ + ' : reading center frequency from instrument')
return float(self._visainstrument.ask('FREQ:CENT?'))
def do_set_centerfrequency(self, centerfrequency):
'''
Set center frequency of device
Input:
centerfrequency (float) : center frequency in Hz
Output:
None
'''
logging.debug(__name__ + ' : setting center frequency to %s Hz' % centerfrequency)
self._visainstrument.write('FREQ:CENT %f' % centerfrequency+';*WAI')
def do_get_span(self):
'''
Get span from device
Input:
None
Output:
span (float) : span in Hz
'''
logging.debug(__name__ + ' : reading span from instrument')
return float(self._visainstrument.ask('FREQ:SPAN?'))
def do_set_span(self,span):
'''
Set span of device
Input:
span (float) : span in Hz
Output:
None
'''
logging.debug(__name__ + ' : setting span to %s Hz' % span)
self._visainstrument.write('FREQ:SPAN %e' % span)
def do_get_referencelevel(self):
'''
Get reference level from device
Input:
None
Output:
referencelevel (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading referencelevel from instrument')
return float(self._visainstrument.ask('DISP:TRAC:Y:RLEV?'))
def do_set_referencelevel(self,referencelevel):
'''
Set referencelevel of device
Input:
referencelevel (float) : reference level in dBm(??)
Output:
None
'''
logging.debug(__name__ + ' : setting referencelevel to %s dBm' % referencelevel)
self._visainstrument.write('DISP:TRAC:Y:RLEV %e' % referencelevel)
def do_get_mode(self):
'''
Get mode from device
Input:
None
Output:
mode (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading mode from instrument')
return self._visainstrument.ask('INST?').strip()
def do_set_mode(self,mode):
'''
Set mode of device
Input:
mode (float) : mode
Output:
None
'''
logging.debug(__name__ + ' : setting sweep_mode to %s' % mode)
self._visainstrument.write('INST %s' % mode)
def do_get_continuous_sweep(self):
'''
Get continuous_sweep from device
Input:
None
Output:
continuous_sweep (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading continuous_sweep from instrument')
return int(self._visainstrument.ask('INIT:CONT?').strip())
def do_set_continuous_sweep(self, continuous_sweep):
'''
Set continuous_sweep of device
Input:
continuous_sweep (float) : continuous_sweep
Output:
None
'''
logging.debug(__name__ + ' : setting continuous_sweep to %r' % continuous_sweep)
if continuous_sweep:
string = 'ON'
else:
string = 'OFF'
self._visainstrument.write('INIT:CONT %s' % string)
def do_get_sweep_points(self):
'''
Get sweep_points from device
Input:
None
Output:
sweep_points (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading sweep_points from instrument')
return int(self._visainstrument.ask('SWE:POIN?').strip())
def get_sweep_time(self):
'''
Get the sweep time in Seconds
'''
logging.debug(__name__ + ' : reading sweep_time from instrument')
return float(self._visainstrument.ask('SWE:TIME?').strip())
def do_set_sweep_points(self, sweep_points):
'''
Set sweep_points of device
Input:
sweep_points (float) : sweep_points
Output:
None
'''
logging.debug(__name__ + ' : setting sweep_points to %d' % sweep_points)
self._visainstrument.write('SWE:POIN %d' % sweep_points)
def do_get_bandwidth(self):
'''
Get bandwidth from device
Input:
None
Output:
bandwidth (float) : reference level in dBm
'''
logging.debug(__name__ + ' : reading bandwidth from instrument')
return int(self._visainstrument.ask('BAND?').strip())
def do_set_bandwidth(self, bandwidth):
'''
Set bandwidth of device
Input:
bandwidth (float) : bandwidth
Output:
None
'''
logging.debug(__name__ + ' : setting bandwidth to %d' % bandwidth)
self._visainstrument.write('BAND %d' % bandwidth)
def wait_till_complete(self):
try:
self._visainstrument.query('*ESR?')
self._visainstrument.write('*OPC')
sweeptime=self.get_sweep_time()*self.get_sweep_count()
qt.msleep(sweeptime-2.)
while int(self._visainstrument.query('*ESR?').strip())%2==0:
qt.msleep(0.1)
except VisaIOError:
print ('FSV timed out. It may be preparing the sweep.\nPress enter to start the sweep.')
raw_input()
self.run_single(wait=True)
except KeyboardInterrupt:
raise Exception('Interrupted in middle of sweep')
def get_data(self):
logging.debug(__name__ + ' : fetching data')
center = self.get_centerfrequency()
span = self.get_span()
npoints = self.get_sweep_points()
#self.run_single(wait=True)
xvals = np.linspace(center-span/2.0, center+span/2.0, npoints)
yvals = self._visainstrument.query('TRAC? TRACE1').split(',')
yvals = map(float,yvals)
return xvals, yvals
def run_single(self, wait=False):
'''
Trigger a single Sweep
'''
self._visainstrument.write('INIT:CONT OFF')
self._visainstrument.write('INIT;*WAI')
if wait:
self.wait_till_complete()
def set_sweep_mode_avg(self, mode = 'LIN'):
logging.debug(__name__ + ' : setting mode to AVG')
self._visainstrument.write('DISP:TRAC:MODE AVER')
self._visainstrument.write('SENS:AVER:TYPE %s'%mode)
def set_sweep_count(self, counts):
logging.debug(__name__ + ' : setting sweep count to %d'%counts)
self._visainstrument.write('SWE:COUN %s'%counts)
def get_sweep_count(self):
# logging.debug(__name__ + ' : setting sweep count to %d'%counts)
return int(self._visainstrument.ask('SWE:COUN?'))
def w(self, string):
return self._visainstrument.write(string)
|
the-stack_0_17989 | import random
from itertools import product
from .common import extract_prime_power
from .modular import solve_crt, invmod
def has_sqrtmod(a, factors=None):
"""
Check if @a is quadratic residue, factorization needed
@factors - list of (prime, power) tuples
"""
if not factors:
raise ValueError("Factors can't be empty: %s" % factors)
for p, k in factors.items():
if p <= 1 or k <= 0:
raise ValueError("Not valid prime power: %s**%s" % (p, k))
if not has_sqrtmod_prime_power(a, p, k):
return False
return True
def sqrtmod(a, factors):
"""
x ^ 2 = a (mod *factors).
Yield square roots by product of @factors as modulus.
@factors - list of (prime, power) tuples
"""
coprime_factors = [p ** k for p, k in factors.items()]
sqrts = []
for i, (p, k) in enumerate(factors.items()):
# it's bad that all roots by each modulus are calculated here
# - we can start yielding roots faster
sqrts.append(
list(sqrtmod_prime_power(a % coprime_factors[i], p, k))
)
for rems in product(*sqrts):
yield solve_crt(rems, coprime_factors)
return
def has_sqrtmod_prime_power(a, p, n=1):
"""
Check if @a (mod @p**@n) is quadratic residue, @p is prime.
"""
if p < 2:
raise ValueError("Prime must be greater than 1: " + str(p))
if n < 1:
raise ValueError("Prime power must be positive: " + str(n))
a = a % (p ** n)
if a in (0, 1):
return True
e, a = extract_prime_power(a, p)
if e:
if e & 1:
return False
else:
return has_sqrtmod_prime_power(a, p, n)
if p == 2: # power of 2
return a % 8 == 1
return jacobi(a, p) == 1
def sqrtmod_prime_power(a, p, k=1):
"""
Yield square roots of @a mod @p**@k,
@p - prime
@k >= 1
"""
if k < 1:
raise ValueError("prime power k < 1: %d" % k)
powers = [1]
pow_p = 1
for i in range(k):
pow_p *= p
powers.append(pow_p)
# x**2 == a (mod p), p is prime
def sqrtmod_prime(a, p):
if a == 0:
return (0,)
if a == 1:
return (1, p-1) if p != 2 else (1,)
if jacobi(a, p) == -1:
raise ValueError("No square root for %d (mod %d)" % (a, p))
while True:
b = random.randint(1, p - 1)
if jacobi(b, p) == -1:
break
pow2, t = extract_prime_power(p - 1, 2)
ai = invmod(a, p)
c = pow(b, t, p)
r = pow(a, (t + 1) // 2, p)
for i in range(1, pow2):
e = pow(2, pow2 - i - 1, p - 1)
d = pow(pow(r, 2, p) * ai, e, p)
if d == p - 1:
r = (r * c) % p
c = pow(c, 2, p)
return (r, (-r) % p) # both roots
# x**2 == a (mod p**k), p is prime, gcd(a, p) == 1
def sqrtmod_prime_power_for_coprime(a, p, k):
if a == 1:
if p == 2:
if k == 1:
return (1, )
if k == 2:
return (1, 3)
if k == 3:
return (1, 3, 5, 7)
else:
return 1, pow_p - 1
if p == 2: # roots mod 2**k
roots = 1, 3
powind = 3
while powind < k:
next_powind = powind + 1
next_roots = set()
arem = a % powers[next_powind]
for r in roots: # can be done better
if pow(r, 2, powers[next_powind]) == arem:
next_roots.add(r)
r = powers[powind] - r
if pow(r, 2, powers[next_powind]) == arem:
next_roots.add(r)
powind = next_powind
roots = next_roots
roots = [pow_p - r for r in roots] + list(roots)
return roots
else: # p >= 3
r = sqrtmod_prime(a, p)[0] # any root
powind = 1
while powind < k:
next_powind = min(powind * 2, k)
# Represent root: x = +- (r + p**powind * t1)
b = (a - r**2) % powers[next_powind]
b = (b * invmod(2*r, powers[next_powind])) % powers[next_powind]
if b:
if b % powers[powind]:
raise ValueError("No square root for given value")
b //= powers[powind]
b %= powers[powind]
# Represent t1 = t2 * p**powind + b
# Re-represent root:
# x = +- [ (r + p**powind * b) + t2 * p**(powind*2) ]
r += powers[powind] * b
powind = next_powind
# For next round: x = +- (r + t2 * p**next_powind)
return r % pow_p, (-r) % pow_p
return
# x**2 == 0 (mod p**k), p is prime
def sqrt_for_zero(p, k):
roots = [0]
start_k = (k // 2 + 1) if k & 1 else (k // 2)
r = powers[start_k] % pow_p
r0 = r
while True:
if r: # don't duplicate zero
roots.append(r)
r = (r + powers[start_k]) % pow_p
if r == r0:
break
return roots
# main code
if a == 0:
for r in sqrt_for_zero(p, k):
yield r
return
e, a = extract_prime_power(a, p)
if e & 1:
raise ValueError("No square root for %d (mod %d**%d)" % (a, p, k))
p_acc = powers[e >> 1]
sqrt_k = k - e
roots = sqrtmod_prime_power_for_coprime(a, p, sqrt_k)
if sqrt_k == 0:
for r in roots:
yield (r * p_acc) % pow_p
return
all_roots = set()
for r in roots:
r0 = r % pow_p
while True:
root = (r * p_acc) % pow_p
if root not in all_roots:
yield root
all_roots.add(root)
r = (r + powers[sqrt_k]) % pow_p
if r == r0:
break
return
def jacobi(a, n):
"""
Return Jacobi symbol (or Legendre symbol if n is prime)
"""
s = 1
while True:
if n < 1:
raise ValueError("Too small module for Jacobi symbol: " + str(n))
if n & 1 == 0:
raise ValueError("Jacobi is defined only for odd modules")
if n == 1:
return s
a = a % n
if a == 0:
return 0
if a == 1:
return s
if a & 1 == 0:
if n % 8 in (3, 5):
s = -s
a >>= 1
continue
if a % 4 == 3 and n % 4 == 3:
s = -s
a, n = n, a
return
|
the-stack_0_17990 | from stringstring.ascii_letters import ascii_letters
def stringstring(string):
base = ''
for i in range(6):
for j in string:
base += ascii_letters[i][j]
base += '\n'
string = string.replace(' ', '')
result = ''
counter = 0
for i in base:
if i == '~':
result += string[counter]
counter += 1
if counter == len(string):
counter = 0
else:
result += i
return result
if __name__ == "__main__":
print(stringstring('Hello World!'))
print(stringstring("This is StringString"))
|
the-stack_0_17992 | """
Copyright (c) 2018, Digi International, Inc.
Module released under MIT License.
Module for easy interface with Digi Remote Manager.
Using documentation from "https://www.digi.com/resources/documentation/digidocs/90001437-13/default.htm#reference/r_ws_v1_streams.htm%3FTocPath%3DWeb%2520services%2520reference%7Cv1%252Fstreams%7C_____0"
- Documentation does require an account to access
Use with samples/cellular/remotemanager/rm_sample.py.
"""
import ubinascii
import urequests
class AuthorizationException(Exception):
pass
STREAMS_URI = "https://remotemanager.digi.com/ws/v1/streams/"
class RemoteManagerConnection:
def __init__(self, credentials, auth_scheme="Basic"):
if not credentials:
self.auth = None
else:
self.set_auth(credentials, auth_scheme)
def set_auth(self, credentials, auth_scheme="Basic"):
if auth_scheme == "Basic":
self.auth = "Basic " + ubinascii.b2a_base64(credentials['username'] + ":" + credentials['password']).decode().strip()
elif auth_scheme == "Bearer":
self.auth = "Bearer " + credentials['token']
else:
raise AuthorizationException("Unsupported authorization scheme: " + auth_scheme)
@staticmethod
def check_response_code(response):
if response.status_code not in (200, 201, 204):
raise ConnectionError("Bad HTTP response status code: " + str(response.status_code))
else:
return response
def set_headers(self, headers):
if not self.auth:
raise AuthorizationException("No authorization credentials provided")
headers = dict() if headers is None else headers
headers['Authorization'] = self.auth
return headers
def get_datastreams(self, headers=None):
headers = self.set_headers(headers)
response = urequests.get(STREAMS_URI + "inventory.json", headers=headers)
self.check_response_code(response)
return [stream['id'] for stream in response.json()['list']]
def get_datastream_info(self, stream_id, headers=None):
headers = self.set_headers(headers)
response = urequests.get(STREAMS_URI + "inventory/" + stream_id + ".json", headers=headers)
return self.check_response_code(response)
def update_datastream(self, stream_id, json, headers=None):
headers = self.set_headers(headers)
response = urequests.put(STREAMS_URI + "inventory/" + stream_id, headers=headers, json=json)
return self.check_response_code(response)
def create_datastream(self, json, headers=None):
headers = self.set_headers(headers)
response = urequests.post(STREAMS_URI + "inventory/", headers=headers, json=json)
return self.check_response_code(response)
def delete_datastream(self, stream_id, headers=None):
headers = self.set_headers(headers)
response = urequests.delete(STREAMS_URI + "inventory/" + stream_id, headers=headers)
return self.check_response_code(response)
def add_datapoint(self, stream_id, value, headers=None):
headers = self.set_headers(headers)
response = urequests.post(STREAMS_URI + "history/", headers=headers, json={"stream_id": stream_id, "value": value})
return self.check_response_code(response)
def delete_datapoint(self, stream_id, start_time=None, end_time=None, headers=None):
headers = self.set_headers(headers)
response = urequests.delete(STREAMS_URI + "history/" + stream_id, params={"start_time": start_time, "end_time": end_time}, headers=headers)
return self.check_response_code(response)
|
the-stack_0_17993 | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Readercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import ReadercoinTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(ReadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
|
the-stack_0_17996 | #!/usr/bin/python
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import PyKDL
# import rospy
import baxter_interface
from baxter_kdl.kdl_parser import kdl_tree_from_urdf_model
from urdf_parser_py.urdf import URDF
class baxter_kinematics(object):
"""
Baxter Kinematics with PyKDL
"""
def __init__(self, limb):
self._baxter = URDF.from_parameter_server(key='robot_description')
self._kdl_tree = kdl_tree_from_urdf_model(self._baxter)
self._base_link = self._baxter.get_root()
self._tip_link = limb + '_gripper'
self._tip_frame = PyKDL.Frame()
self._arm_chain = self._kdl_tree.getChain(self._base_link,
self._tip_link)
# Baxter Interface Limb Instances
self._limb_interface = baxter_interface.Limb(limb)
self._joint_names = self._limb_interface.joint_names()
self._num_jnts = len(self._joint_names)
# Store joint information for future use
self.get_joint_information()
# KDL Solvers
self._fk_p_kdl = PyKDL.ChainFkSolverPos_recursive(self._arm_chain)
self._fk_v_kdl = PyKDL.ChainFkSolverVel_recursive(self._arm_chain)
self._ik_v_kdl = PyKDL.ChainIkSolverVel_pinv(self._arm_chain)
self._ik_p_kdl = PyKDL.ChainIkSolverPos_NR(self._arm_chain,
self._fk_p_kdl,
self._ik_v_kdl)
self._jac_kdl = PyKDL.ChainJntToJacSolver(self._arm_chain)
self._dyn_kdl = PyKDL.ChainDynParam(self._arm_chain,
PyKDL.Vector.Zero())
def print_robot_description(self):
nf_joints = 0
for j in self._baxter.joints:
if j.type != 'fixed':
nf_joints += 1
print("URDF non-fixed joints: %d;" % nf_joints)
print("URDF total joints: %d" % len(self._baxter.joints))
print("URDF links: %d" % len(self._baxter.links))
print("KDL joints: %d" % self._kdl_tree.getNrOfJoints())
print("KDL segments: %d" % self._kdl_tree.getNrOfSegments())
def print_kdl_chain(self):
for idx in xrange(self._arm_chain.getNrOfSegments()):
print('* ' + self._arm_chain.getSegment(idx).getName())
def get_joint_information(self):
joints = {}
for j in self._baxter.joints:
if j.type != 'fixed':
joints[j.name] = j
self.joint_limits_lower = []
self.joint_limits_upper = []
self.joint_types = []
for jnt_name in self._joint_names:
jnt = joints[jnt_name]
if jnt.limit is not None:
self.joint_limits_lower.append(jnt.limit.lower)
self.joint_limits_upper.append(jnt.limit.upper)
else:
self.joint_limits_lower.append(None)
self.joint_limits_upper.append(None)
self.joint_types.append(jnt.type)
def replace_none(x, v):
if x is None:
return v
return x
self.joint_limits_lower = np.array([replace_none(jl, -np.inf)
for jl in self.joint_limits_lower])
self.joint_limits_upper = np.array([replace_none(jl, np.inf)
for jl in self.joint_limits_upper])
self.joint_types = np.array(self.joint_types)
def joints_to_kdl(self, type, values=None):
kdl_array = PyKDL.JntArray(self._num_jnts)
if values is None:
if type == 'positions':
cur_type_values = self._limb_interface.joint_angles()
elif type == 'velocities':
cur_type_values = self._limb_interface.joint_velocities()
elif type == 'torques':
cur_type_values = self._limb_interface.joint_efforts()
else:
cur_type_values = values
for idx, name in enumerate(self._joint_names):
kdl_array[idx] = cur_type_values[name]
if type == 'velocities':
kdl_array = PyKDL.JntArrayVel(kdl_array)
return kdl_array
def kdl_to_mat(self, data):
mat = np.mat(np.zeros((data.rows(), data.columns())))
for i in range(data.rows()):
for j in range(data.columns()):
mat[i,j] = data[i,j]
return mat
def forward_position_kinematics(self,joint_values=None):
end_frame = PyKDL.Frame()
self._fk_p_kdl.JntToCart(self.joints_to_kdl('positions',joint_values),
end_frame)
pos = end_frame.p
rot = PyKDL.Rotation(end_frame.M)
rot = rot.GetQuaternion()
return np.array([pos[0], pos[1], pos[2],
rot[0], rot[1], rot[2], rot[3]])
def forward_velocity_kinematics(self,joint_velocities=None):
end_frame = PyKDL.FrameVel()
self._fk_v_kdl.JntToCart(self.joints_to_kdl('velocities',joint_velocities),
end_frame)
return end_frame.GetTwist()
def inverse_kinematics(self, position, orientation=None, seed=None, min_joints=None, max_joints=None, maxiter=500, eps=1.0e-6):
ik = PyKDL.ChainIkSolverVel_pinv(self._arm_chain)
pos = PyKDL.Vector(position[0], position[1], position[2])
if orientation is not None:
rot = PyKDL.Rotation()
rot = rot.Quaternion(orientation[0], orientation[1],
orientation[2], orientation[3])
# Populate seed with current angles if not provided
seed_array = PyKDL.JntArray(self._num_jnts)
if seed is not None:
seed_array.resize(len(seed))
for idx, jnt in enumerate(seed):
seed_array[idx] = jnt
else:
seed_array = self.joints_to_kdl('positions')
# Make IK Call
if orientation is not None:
goal_pose = PyKDL.Frame(rot, pos)
else:
goal_pose = PyKDL.Frame(pos)
result_angles = PyKDL.JntArray(self._num_jnts)
# Make IK solver with joint limits
if min_joints is None:
min_joints = self.joint_limits_lower
if max_joints is None:
max_joints = self.joint_limits_upper
mins_kdl = PyKDL.JntArray(len(min_joints))
for idx,jnt in enumerate(min_joints): mins_kdl[idx] = jnt
maxs_kdl = PyKDL.JntArray(len(max_joints))
for idx,jnt in enumerate(max_joints): maxs_kdl[idx] = jnt
ik_p_kdl = PyKDL.ChainIkSolverPos_NR_JL(self._arm_chain, mins_kdl, maxs_kdl,
self._fk_p_kdl, self._ik_v_kdl, maxiter, eps)
if ik_p_kdl.CartToJnt(seed_array, goal_pose, result_angles) >= 0:
result = np.array(list(result_angles))
return result
else:
return None
def jacobian(self,joint_values=None):
jacobian = PyKDL.Jacobian(self._num_jnts)
self._jac_kdl.JntToJac(self.joints_to_kdl('positions',joint_values), jacobian)
return self.kdl_to_mat(jacobian)
def jacobian_transpose(self,joint_values=None):
return self.jacobian(joint_values).T
def jacobian_pseudo_inverse(self,joint_values=None):
return np.linalg.pinv(self.jacobian(joint_values))
def inertia(self,joint_values=None):
inertia = PyKDL.JntSpaceInertiaMatrix(self._num_jnts)
self._dyn_kdl.JntToMass(self.joints_to_kdl('positions',joint_values), inertia)
return self.kdl_to_mat(inertia)
def cart_inertia(self,joint_values=None):
js_inertia = self.inertia(joint_values)
jacobian = self.jacobian(joint_values)
return np.linalg.inv(jacobian * np.linalg.inv(js_inertia) * jacobian.T)
|
the-stack_0_17997 | # Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from . import schemes
from . import transport as t
from .status import OK
__all__ = ['Response']
class Response(object):
"""A TChannel response.
This is sent by handlers and received by callers.
:ivar body:
The payload of this response. The type of this attribute depends on the
scheme being used (e.g., JSON, Thrift, etc.).
:ivar headers:
A dictionary of application headers. This should be a mapping of
strings to strings.
:ivar transport:
Protocol-level transport headers. These are used for routing over
Hyperbahn.
"""
# TODO implement __repr__
__slots__ = (
'body',
'status',
'headers',
'transport',
)
def __init__(self, body=None, headers=None, transport=None, status=None):
if status is None:
status = OK
self.body = body
self.status = status
self.headers = headers
self.transport = transport
class TransportHeaders(object):
"""Response-specific Transport Headers"""
# TODO implement __repr__
__slots__ = (
'failure_domain',
'scheme',
)
def __init__(self, failure_domain=None, scheme=None):
if scheme is None:
scheme = schemes.RAW
self.failure_domain = failure_domain
self.scheme = scheme
@classmethod
def from_dict(cls, data):
return cls(
failure_domain=data.get(t.FAILURE_DOMAIN),
scheme=data.get(t.SCHEME),
)
def to_dict(self):
m = {}
if self.failure_domain is not None:
m[t.FAILURE_DOMAIN] = self.failure_domain
if self.scheme is not None:
m[t.SCHEME] = self.scheme
return m
def response_from_mixed(mixed):
"""Create Response from mixed input."""
# if none then give empty Response
if mixed is None:
return Response()
# if not Response, then treat like body
if not isinstance(mixed, Response):
return Response(mixed)
# it's already a Response
return mixed
|
the-stack_0_17999 | # -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2017, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ````AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module provides a simple python wrapper over the OVH REST API.
It handles requesting credential, signing queries...
- To get your API keys: https://eu.api.ovh.com/createApp/
- To get started with API: https://api.ovh.com/g934.first_step_with_api
"""
import hashlib
import urllib
import keyword
import time
import json
try:
from urllib import urlencode
except ImportError: # pragma: no cover
# Python 3
from urllib.parse import urlencode
from .vendor.requests import request, Session
from .vendor.requests.packages import urllib3
from .vendor.requests.exceptions import RequestException
# Disable pyopenssl. It breaks SSL connection pool when SSL connection is
# closed unexpetedly by the server. And we don't need SNI anyway.
try:
from .vendor.requests.packages.urllib3.contrib import pyopenssl
pyopenssl.extract_from_urllib3()
except ImportError:
pass
# Disable SNI related Warning. The API does not rely on it
urllib3.disable_warnings(urllib3.exceptions.SNIMissingWarning)
urllib3.disable_warnings(urllib3.exceptions.SecurityWarning)
from .config import config
from .consumer_key import ConsumerKeyRequest
from .exceptions import (
APIError, NetworkError, InvalidResponse, InvalidRegion, InvalidKey,
ResourceNotFoundError, BadParametersError, ResourceConflictError, HTTPError,
NotGrantedCall, NotCredential, Forbidden, InvalidCredential,
)
#: Mapping between OVH API region names and corresponding endpoints
ENDPOINTS = {
'ovh-eu': 'https://eu.api.ovh.com/1.0',
'ovh-ca': 'https://ca.api.ovh.com/1.0',
'kimsufi-eu': 'https://eu.api.kimsufi.com/1.0',
'kimsufi-ca': 'https://ca.api.kimsufi.com/1.0',
'soyoustart-eu': 'https://eu.api.soyoustart.com/1.0',
'soyoustart-ca': 'https://ca.api.soyoustart.com/1.0',
}
#: Default timeout for each request. 180 seconds connect, 180 seconds read.
TIMEOUT = 180
class Client(object):
"""
Low level OVH Client. It abstracts all the authentication and request
signing logic along with some nice tools helping with key generation.
All low level request logic including signing and error handling takes place
in :py:func:`Client.call` function. Convenient wrappers
:py:func:`Client.get` :py:func:`Client.post`, :py:func:`Client.put`,
:py:func:`Client.delete` should be used instead. :py:func:`Client.post`,
:py:func:`Client.put` both accept arbitrary list of keyword arguments
mapped to ``data`` param of :py:func:`Client.call`.
Example usage:
.. code:: python
from ovh import Client, APIError
REGION = 'ovh-eu'
APP_KEY="<application key>"
APP_SECRET="<application secret key>"
CONSUMER_KEY="<consumer key>"
client = Client(REGION, APP_KEY, APP_SECRET, CONSUMER_KEY)
try:
print client.get('/me')
except APIError as e:
print "Ooops, failed to get my info:", e.msg
"""
def __init__(self, endpoint=None, application_key=None,
application_secret=None, consumer_key=None, timeout=TIMEOUT,
config_file=None):
"""
Creates a new Client. No credential check is done at this point.
The ``application_key`` identifies your application while
``application_secret`` authenticates it. On the other hand, the
``consumer_key`` uniquely identifies your application's end user without
requiring his personal password.
If any of ``endpoint``, ``application_key``, ``application_secret``
or ``consumer_key`` is not provided, this client will attempt to locate
from them from environment, ~/.ovh.cfg or /etc/ovh.cfg.
See :py:mod:`ovh.config` for more informations on supported
configuration mechanisms.
``timeout`` can either be a float or a tuple. If it is a float it
sets the same timeout for both connection and read. If it is a tuple
connection and read timeout will be set independently. To use the
latter approach you need at least requests v2.4.0. Default value is
180 seconds for connection and 180 seconds for read.
:param str endpoint: API endpoint to use. Valid values in ``ENDPOINTS``
:param str application_key: Application key as provided by OVH
:param str application_secret: Application secret key as provided by OVH
:param str consumer_key: uniquely identifies
:param tuple timeout: Connection and read timeout for each request
:param float timeout: Same timeout for both connection and read
:raises InvalidRegion: if ``endpoint`` can't be found in ``ENDPOINTS``.
"""
# Load a custom config file if requested
if config_file is not None:
config.read(config_file)
# load endpoint
if endpoint is None:
endpoint = config.get('default', 'endpoint')
try:
self._endpoint = ENDPOINTS[endpoint]
except KeyError:
raise InvalidRegion("Unknow endpoint %s. Valid endpoints: %s",
endpoint, ENDPOINTS.keys())
# load keys
if application_key is None:
application_key = config.get(endpoint, 'application_key')
self._application_key = application_key
if application_secret is None:
application_secret = config.get(endpoint, 'application_secret')
self._application_secret = application_secret
if consumer_key is None:
consumer_key = config.get(endpoint, 'consumer_key')
self._consumer_key = consumer_key
# lazy load time delta
self._time_delta = None
# use a requests session to reuse HTTPS connections between requests
self._session = Session()
# Override default timeout
self._timeout = timeout
## high level API
@property
def time_delta(self):
"""
Request signatures are valid only for a short amount of time to mitigate
risk of attack replay scenarii which requires to use a common time
reference. This function queries endpoint's time and computes the delta.
This entrypoint does not require authentication.
This method is *lazy*. It will only load it once even though it is used
for each request.
.. note:: You should not need to use this property directly
:returns: time distance between local and server time in seconds.
:rtype: int
"""
if self._time_delta is None:
server_time = self.get('/auth/time', _need_auth=False)
self._time_delta = server_time - int(time.time())
return self._time_delta
def new_consumer_key_request(self):
"""
Create a new consumer key request. This is the recommended way to create
a new consumer key request.
Full example:
>>> import ovh
>>> client = ovh.Client("ovh-eu")
>>> ck = client.new_consumer_key_request()
>>> ck.add_rules(ovh.API_READ_ONLY, "/me")
>>> ck.add_recursive_rules(ovh.API_READ_WRITE, "/sms")
>>> ck.request()
{
'state': 'pendingValidation',
'consumerKey': 'TnpZAd5pYNqxk4RhlPiSRfJ4WrkmII2i',
'validationUrl': 'https://eu.api.ovh.com/auth/?credentialToken=now2OOAVO4Wp6t7bemyN9DMWIobhGjFNZSHmixtVJM4S7mzjkN2L5VBfG96Iy1i0'
}
"""
return ConsumerKeyRequest(self)
def request_consumerkey(self, access_rules, redirect_url=None):
"""
Create a new "consumer key" identifying this application's end user. API
will return a ``consumerKey`` and a ``validationUrl``. The end user must
visit the ``validationUrl``, authenticate and validate the requested
``access_rules`` to link his account to the ``consumerKey``. Once this
is done, he may optionaly be redirected to ``redirect_url`` and the
application can start using the ``consumerKey``.
The new ``consumerKey`` is automatically loaded into
``self._consumer_key`` and is ready to used as soon as validated.
As signing requires a valid ``consumerKey``, the method does not require
authentication, only a valid ``applicationKey``
``access_rules`` is a list of the form:
.. code:: python
# Grant full, unrestricted API access
access_rules = [
{'method': 'GET', 'path': '/*'},
{'method': 'POST', 'path': '/*'},
{'method': 'PUT', 'path': '/*'},
{'method': 'DELETE', 'path': '/*'}
]
To request a new consumer key, you may use a code like:
.. code:: python
# Request RO, /me API access
access_rules = [
{'method': 'GET', 'path': '/me'},
]
# Request token
validation = client.request_consumerkey(access_rules)
print "Please visit", validation['validationUrl'], "to authenticate"
raw_input("and press Enter to continue...")
# Print nice welcome message
print "Welcome", client.get('/me')['firstname']
:param list access_rules: Mapping specifying requested privileges.
:param str redirect_url: Where to redirect end user upon validation.
:raises APIError: When ``self.call`` fails.
:returns: dict with ``consumerKey`` and ``validationUrl`` keys
:rtype: dict
"""
res = self.post('/auth/credential', _need_auth=False,
accessRules=access_rules, redirection=redirect_url)
self._consumer_key = res['consumerKey']
return res
## API shortcuts
def _canonicalize_kwargs(self, kwargs):
"""
If an API needs an argument colliding with a Python reserved keyword, it
can be prefixed with an underscore. For example, ``from`` argument of
``POST /email/domain/{domain}/redirection`` may be replaced by ``_from``
:param dict kwargs: input kwargs
:return dict: filtered kawrgs
"""
arguments = {}
for k, v in kwargs.items():
if k[0] == '_' and k[1:] in keyword.kwlist:
k = k[1:]
arguments[k] = v
return arguments
def _prepare_query_string(self, kwargs):
"""
Boolean needs to be send as lowercase 'false' or 'true' in querystring.
This function prepares arguments for querystring and encodes them.
:param dict kwargs: input kwargs
:return string: prepared querystring
"""
arguments = {}
for k, v in kwargs.items():
if isinstance(v, bool):
v = str(v).lower()
arguments[k] = v
return urlencode(arguments)
def get(self, _target, _need_auth=True, **kwargs):
"""
'GET' :py:func:`Client.call` wrapper.
Query string parameters can be set either directly in ``_target`` or as
keywork arguments. If an argument collides with a Python reserved
keyword, prefix it with a '_'. For instance, ``from`` becomes ``_from``.
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
if kwargs:
kwargs = self._canonicalize_kwargs(kwargs)
query_string = self._prepare_query_string(kwargs)
if '?' in _target:
_target = '%s&%s' % (_target, query_string)
else:
_target = '%s?%s' % (_target, query_string)
return self.call('GET', _target, None, _need_auth)
def put(self, _target, _need_auth=True, **kwargs):
"""
'PUT' :py:func:`Client.call` wrapper
Body parameters can be set either directly in ``_target`` or as keywork
arguments. If an argument collides with a Python reserved keyword,
prefix it with a '_'. For instance, ``from`` becomes ``_from``.
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
kwargs = self._canonicalize_kwargs(kwargs)
return self.call('PUT', _target, kwargs, _need_auth)
def post(self, _target, _need_auth=True, **kwargs):
"""
'POST' :py:func:`Client.call` wrapper
Body parameters can be set either directly in ``_target`` or as keywork
arguments. If an argument collides with a Python reserved keyword,
prefix it with a '_'. For instance, ``from`` becomes ``_from``.
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
kwargs = self._canonicalize_kwargs(kwargs)
return self.call('POST', _target, kwargs, _need_auth)
def delete(self, _target, _need_auth=True):
"""
'DELETE' :py:func:`Client.call` wrapper
:param string _target: API method to call
:param string _need_auth: If True, send authentication headers. This is
the default
"""
return self.call('DELETE', _target, None, _need_auth)
## low level helpers
def call(self, method, path, data=None, need_auth=True):
"""
Low level call helper. If ``consumer_key`` is not ``None``, inject
authentication headers and sign the request.
Request signature is a sha1 hash on following fields, joined by '+'
- application_secret
- consumer_key
- METHOD
- full request url
- body
- server current time (takes time delta into account)
:param str method: HTTP verb. Usually one of GET, POST, PUT, DELETE
:param str path: api entrypoint to call, relative to endpoint base path
:param data: any json serializable data to send as request's body
:param boolean need_auth: if False, bypass signature
:raises HTTPError: when underlying request failed for network reason
:raises InvalidResponse: when API response could not be decoded
"""
# attempt request
try:
result = self.raw_call(method=method, path=path, data=data, need_auth=need_auth)
except RequestException as error:
raise HTTPError("Low HTTP request failed error", error)
status = result.status_code
# attempt to decode and return the response
try:
json_result = result.json()
except ValueError as error:
raise InvalidResponse("Failed to decode API response", error)
# error check
if status >= 100 and status < 300:
return json_result
elif status == 403 and json_result.get('errorCode') == 'NOT_GRANTED_CALL':
raise NotGrantedCall(json_result.get('message'),
response=result)
elif status == 403 and json_result.get('errorCode') == 'NOT_CREDENTIAL':
raise NotCredential(json_result.get('message'),
response=result)
elif status == 403 and json_result.get('errorCode') == 'INVALID_KEY':
raise InvalidKey(json_result.get('message'), response=result)
elif status == 403 and json_result.get('errorCode') == 'INVALID_CREDENTIAL':
raise InvalidCredential(json_result.get('message'),
response=result)
elif status == 403 and json_result.get('errorCode') == 'FORBIDDEN':
raise Forbidden(json_result.get('message'), response=result)
elif status == 404:
raise ResourceNotFoundError(json_result.get('message'),
response=result)
elif status == 400:
raise BadParametersError(json_result.get('message'),
response=result)
elif status == 409:
raise ResourceConflictError(json_result.get('message'),
response=result)
elif status == 0:
raise NetworkError()
else:
raise APIError(json_result.get('message'), response=result)
def raw_call(self, method, path, data=None, need_auth=True):
"""
Lowest level call helper. If ``consumer_key`` is not ``None``, inject
authentication headers and sign the request.
Will return a vendored ``requests.Response`` object or let any
``requests`` exception pass through.
Request signature is a sha1 hash on following fields, joined by '+'
- application_secret
- consumer_key
- METHOD
- full request url
- body
- server current time (takes time delta into account)
:param str method: HTTP verb. Usually one of GET, POST, PUT, DELETE
:param str path: api entrypoint to call, relative to endpoint base path
:param data: any json serializable data to send as request's body
:param boolean need_auth: if False, bypass signature
"""
body = ''
target = self._endpoint + path
headers = {
'X-Ovh-Application': self._application_key
}
# include payload
if data is not None:
headers['Content-type'] = 'application/json'
body = json.dumps(data)
# sign request. Never sign 'time' or will recuse infinitely
if need_auth:
if not self._application_secret:
raise InvalidKey("Invalid ApplicationSecret '%s'" %
self._application_secret)
if not self._consumer_key:
raise InvalidKey("Invalid ConsumerKey '%s'" %
self._consumer_key)
now = str(int(time.time()) + self.time_delta)
signature = hashlib.sha1()
signature.update("+".join([
self._application_secret, self._consumer_key,
method.upper(), target,
body,
now
]).encode('utf-8'))
headers['X-Ovh-Consumer'] = self._consumer_key
headers['X-Ovh-Timestamp'] = now
headers['X-Ovh-Signature'] = "$1$" + signature.hexdigest()
return self._session.request(method, target, headers=headers,
data=body, timeout=self._timeout)
|
the-stack_0_18005 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_wine
from pdb import set_trace as breakpoint
from IPython.display import display
class My_Data_Splitter():
def __init__(self, df, features, target):
self.df = df
self.features = features
self.target = target
self.X = df[features]
self.y = df[target]
def train_validation_test_split(self,
train_size=0.7, val_size=0.1,
test_size=0.2, random_state=None,
shuffle=True):
"""
This function is a utility wrapper around the Scikit-Learn train_test_split that splits arrays or
matrices into train, validation, and test subsets.
Args:
X (Numpy array or DataFrame): This is a dataframe with features.
y (Numpy array or DataFrame): This is a pandas Series with target.
train_size (float or int): Proportion of the dataset to include in the train split (0 to 1).
val_size (float or int): Proportion of the dataset to include in the validation split (0 to 1).
test_size (float or int): Proportion of the dataset to include in the test split (0 to 1).
random_state (int): Controls the shuffling applied to the data before applying the split for reproducibility.
shuffle (bool): Whether or not to shuffle the data before splitting
Returns:
Train, test, and validation dataframes for features (X) and target (y).
"""
X_train_val, X_test, y_train_val, y_test = train_test_split(
self.X, self.y, test_size=test_size, random_state=random_state, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=val_size / (train_size + val_size),
random_state=random_state, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
def print_split_summary(self, X_train, X_val, X_test):
print('######################## TRAINING DATA ########################')
print(f'X_train Shape: {X_train.shape}')
display(X_train.describe(include='all').transpose())
print('')
print('######################## VALIDATION DATA ######################')
print(f'X_val Shape: {X_val.shape}')
display(X_val.describe(include='all').transpose())
print('')
print('######################## TEST DATA ############################')
print(f'X_test Shape: {X_test.shape}')
display(X_test.describe(include='all').transpose())
print('')
def tvt_split(df, tvt_stratify='target', tvt_train_size=0.70,
tvt_val_size=0.15, tvt_test_size=0.15, tvt_random_state=42):
'''This function uses train test split and calculates an extra split for
your validation set.
It also stratifies and splits everything into X and y sets.
example:
_train, y_train, X_val, y_val, X_test, y_test=tvt_split(
df,
tvt_stratify='target_column'
)
'''
tvt_df = df.copy()
tvt_temp_size = tvt_val_size + tvt_test_size
train, temp = train_test_split(tvt_df, train_size=tvt_train_size,
test_size=tvt_temp_size,
stratify=tvt_df[tvt_stratify],
random_state=tvt_random_state)
tvt_val_size_adjusted = tvt_val_size / tvt_temp_size
tvt_test_size_adjusted = tvt_test_size / tvt_temp_size
val, test = train_test_split(temp, train_size=tvt_val_size_adjusted,
test_size=tvt_test_size_adjusted,
stratify=temp[tvt_stratify],
random_state=tvt_random_state)
X_train = train.drop(tvt_stratify, axis=1)
y_train = train[tvt_stratify]
X_val = val.drop(tvt_stratify, axis=1)
y_val = val[tvt_stratify]
X_test = test.drop(tvt_stratify, axis=1)
y_test = test[tvt_stratify]
return X_train, y_train, X_val, y_val, X_test, y_test
def train_validation_test_split(X, y, train_size=0.7, val_size=0.1,
test_size=0.2, random_state=None,
shuffle=True):
X_train_val, X_test, y_train_val, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=val_size/(train_size+val_size),
random_state=random_state, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
if __name__ == '__main__':
raw_data = load_wine()
df = pd.DataFrame(data=raw_data['data'], columns=raw_data['feature_names'])
df['target'] = raw_data['target']
# breakpoint()
# X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(
# df[['ash', 'hue']], df['target'])
#
# Test the My_Data_Splitter class
splitter = My_Data_Splitter(df=df, features=['ash', 'hue'], target='target')
X_train, X_val, X_test, y_train, y_val, y_test = splitter.train_validation_test_split
splitter.print_split_summary(X_train, X_val, X_test)
|
the-stack_0_18006 | import cv2 as cv
import numpy as np
# Set up training data
## [setup1]
labels = np.array([1, -1, -1, -1])
trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32)
## [setup1]
# Train the SVM
## [init]
svm = cv.ml.SVM_create()
svm.setType(cv.ml.SVM_C_SVC)
svm.setKernel(cv.ml.SVM_LINEAR)
svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6))
## [init]
## [train]
svm.train(trainingData, cv.ml.ROW_SAMPLE, labels)
## [train]
# Data for visual representation
width = 512
height = 512
image = np.zeros((height, width, 3), dtype=np.uint8)
# Show the decision regions given by the SVM
## [show]
green = (0,255,0)
blue = (255,0,0)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
sampleMat = np.matrix([[j,i]], dtype=np.float32)
response = svm.predict(sampleMat)[1]
if response == 1:
image[i,j] = green
elif response == -1:
image[i,j] = blue
## [show]
# Show the training data
## [show_data]
thickness = -1
cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness)
cv.circle(image, (255, 10), 5, (255, 255, 255), thickness)
cv.circle(image, (501, 255), 5, (255, 255, 255), thickness)
cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness)
## [show_data]
# Show support vectors
## [show_vectors]
thickness = 2
sv = svm.getUncompressedSupportVectors()
for i in range(sv.shape[0]):
cv.circle(image, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thickness)
## [show_vectors]
cv.imwrite('result.png', image) # save the image
cv.imshow('SVM Simple Example', image) # show it to the user
cv.waitKey()
|
the-stack_0_18007 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowBaremetalServerTagsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[BaremetalServerTag]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""ShowBaremetalServerTagsResponse - a model defined in huaweicloud sdk"""
super(ShowBaremetalServerTagsResponse, self).__init__()
self._tags = None
self.discriminator = None
if tags is not None:
self.tags = tags
@property
def tags(self):
"""Gets the tags of this ShowBaremetalServerTagsResponse.
:return: The tags of this ShowBaremetalServerTagsResponse.
:rtype: list[BaremetalServerTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ShowBaremetalServerTagsResponse.
:param tags: The tags of this ShowBaremetalServerTagsResponse.
:type: list[BaremetalServerTag]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowBaremetalServerTagsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_18009 | from sqlalchemy import *
from migrate import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relation
TableBase = declarative_base()
# Stub tables
class Role(TableBase):
__tablename__ = 'roles'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(127), nullable=False)
# Modified tables
class User(TableBase):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, nullable=False)
role_id = Column(Integer, ForeignKey('roles.id'), nullable=False)
class Log(TableBase):
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
privileges = Column(Unicode)
# New tables
class UserRole(TableBase):
__tablename__ = 'user_roles'
user_id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=False)
role_id = Column(Integer, ForeignKey('roles.id'), primary_key=True, nullable=False)
# Deleted tables
class Privilege(TableBase):
__tablename__ = 'privileges'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(127), nullable=False)
description = Column(Unicode, nullable=True)
class RolePrivilege(TableBase):
__tablename__ = 'role_privileges'
role_id = Column(Integer, ForeignKey('roles.id'), primary_key=True, nullable=False)
priv_id = Column(Integer, ForeignKey('privileges.id'), primary_key=True, nullable=False)
class LogPrivilege(TableBase):
__tablename__ = 'log_privileges'
log_id = Column(Integer, ForeignKey('logs.id'), primary_key=True, nullable=False)
priv_id = Column(Integer, ForeignKey('privileges.id'), primary_key=True, nullable=False)
Role.privileges = relation(Privilege, secondary=RolePrivilege.__table__)
User.role = relation(Role, innerjoin=True, backref='users')
User.roles = relation(Role, UserRole.__table__)
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
LogPrivilege.__table__.drop()
RolePrivilege.__table__.drop()
Privilege.__table__.drop()
UserRole.__table__.create()
Log.__table__.c.privileges.create()
Session = sessionmaker(bind=migrate_engine)()
user_role = Session.query(Role).filter_by(name=u'user').one()
admin_role = Session.query(Role).filter_by(name=u'admin').one()
for user in Session.query(User).all():
user.roles.append(user_role)
for user in Session.query(User).filter_by(role=admin_role).all():
user.roles.append(admin_role)
Session.commit()
User.__table__.c.role_id.drop()
def downgrade(migrate_engine):
# XXX will drop everyone to just the basic user role
TableBase.metadata.bind = migrate_engine
Session = sessionmaker(bind=migrate_engine)()
base_user_id, = Session.query(Role.id).filter_by(name=u'user').one()
Session.rollback()
role_id = User.__table__.c.role_id
role_id.server_default = DefaultClause(str(base_user_id))
role_id.create()
role_id.alter(server_default=None)
Log.__table__.c.privileges.drop()
UserRole.__table__.drop()
Privilege.__table__.create()
RolePrivilege.__table__.create()
LogPrivilege.__table__.create()
# Add canonical privileges
privileges = [
Privilege(name=name, description=description)
for name, description in [
(u'auth.certificates', u'Can manage own client certificates'),
(u'auth.method', u'Can manage own authentication method'),
(u'auth.openid', u'Can manage own OpenID URLs'),
(u'art.upload', u'Can upload art'),
(u'art.rate', u'Can rate art'),
(u'comments.add', u'Can post comments'),
(u'tags.add', u'Can add tags with no restrictions'),
(u'tags.remove', u'Can remove tags with no restrictions'),
(u'admin.view', u'Can view administrative tools/panel'),
]
]
user_role = Session.query(Role).filter_by(name=u'user').one()
admin_role = Session.query(Role).filter_by(name=u'admin').one()
for priv in privileges:
admin_role.privileges.append(priv)
if not priv.name.startswith('admin.'):
user_role.privileges.append(priv)
Session.commit()
|
the-stack_0_18010 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
import src._init_paths
import os
import torch
import torch.utils.data
from src.lib.opts import opts
from src.lib.models.model import create_model, load_model, save_model
from src.lib.models.data_parallel import DataParallel
from src.lib.logger import Logger
from src.lib.datasets.dataset_factory import get_dataset
from src.lib.trains.train_factory import train_factory
from src.lib.datasets.build import build_dataset
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
# torch.backends.cudnn.enabled = False
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv, opt.region_num_visual,
opt.region_num_temporal, opt.vote_field_size)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
datasets = build_dataset(Dataset, opt, is_train=True)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
datasets,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
datasets,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt) |
the-stack_0_18011 | import torch
from torch.nn import functional as F
from scvi.dataset.data_loaders import TrainTestDataLoaders
from scvi.metrics.classification import compute_accuracy
from . import Inference
class ClassifierInference(Inference):
r"""The ClassifierInference class for training a classifier either on the raw data or on top of the latent
space of another model (VAE, VAEC, SVAEC).
Args:
:model: A model instance from class ``VAE``, ``VAEC``, ``SVAEC``
:gene_dataset: A gene_dataset instance like ``CortexDataset()``
:train_size: The train size, either a float between 0 and 1 or and integer for the number of training samples
to use Default: ``0.8``.
:\**kwargs: Other keywords arguments from the general Inference class.
infer_cls = ClassifierInference(cls, cortex_dataset)
infer_cls.train(n_epochs=1)
infer_cls.accuracy('train')
Examples:
>>> gene_dataset = CortexDataset()
>>> vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=gene_dataset.n_labels)
>>> cls = Classifier(vae.n_latent, n_labels=cortex_dataset.n_labels)
>>> infer = ClassifierInference(gene_dataset, sampling_model=vae, train_size=0.5)
>>> infer.train(n_epochs=20, lr=1e-3)
>>> infer.accuracy('test')
>>> cls = Classifier(gene_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
>>> infer = ClassifierInference(gene_dataset, train_size=0.5)
>>> infer.train(n_epochs=20, lr=1e-3)
>>> infer.accuracy('test')
"""
default_metrics_to_monitor = ['accuracy']
def __init__(self, *args, sampling_model=None, use_cuda=True, **kwargs):
self.sampling_model = sampling_model
super(ClassifierInference, self).__init__(*args, use_cuda=use_cuda, **kwargs)
if 'data_loaders' not in kwargs:
self.data_loaders = TrainTestDataLoaders(self.gene_dataset, train_size=0.1)
def train(self, *args, **kargs):
if hasattr(self.model, "update_parameters"):
with torch.no_grad():
self.model.update_parameters(self.sampling_model, self.data_loaders['train'])
else:
super(ClassifierInference, self).train(*args, **kargs)
def loss(self, tensors_labelled):
x, _, _, _, labels_train = tensors_labelled
x = self.sampling_model.sample_from_posterior_z(x) if self.sampling_model is not None else x
return F.cross_entropy(self.model(x), labels_train.view(-1))
def accuracy(self, name, verbose=False):
model, cls = (self.sampling_model, self.model) if hasattr(self, 'sampling_model') else (self.model, None)
acc = compute_accuracy(model, self.data_loaders[name], classifier=cls)
if verbose:
print("Acc for %s is : %.4f" % (name, acc))
return acc
accuracy.mode = 'max'
|
the-stack_0_18014 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import shutil
import json
from random import randint
from time import sleep
from fake_useragent import UserAgent
from selenium.webdriver.chrome.options import Options
# if chromedriver is not added to the PATH, uncomment the below line
# webdriver.Chrome(executable_path="./driver/")
options = webdriver.ChromeOptions()
# # start chrome browser
# browser = ""
# soup = ""
story = {}
story["name"] = ""
story["author"] = ""
story["text"] = []
story["img_src"] = ""
def get_story_name():
global soup
_story_name_section = soup.find(
"div", class_="story-title-info storytitleInfo-m__wrapper__1edlu"
)
_name = _story_name_section.find_all("div")[2].text
story_name = ""
if len(_name) != 0:
story_name = _name
return story_name # got the right story name
else:
raise Exception("Error: no story name found")
def get_story_author():
global soup
_story_author_section = soup.find("div", class_="author-name-location-wrapper")
_story_author = _story_author_section.find_all("span")[0].text
if _story_author == "" or _story_author == "লেখা":
# apply second method, may be left-aligned story
try:
_story_author = _story_author_section.find_all("span")[1].text
except:
# author having clickble link
_story_author = _story_author_section.find_all("a")[0].text
author = ""
if len(_story_author) != 0:
author = _story_author
return author # got the right author name
else:
raise Exception("Error: author name not found")
def get_main_image():
global soup
_img_src = ""
_img_section = ""
try:
_img_section = soup.find(
"div", class_="story-card-m__wrapper__ounrk story-card-m__bn-wrapper__OgEBK"
)
_img_src = _img_section.find_all("img")[0]["src"]
_img_src = _img_src.split("?")[0]
except:
try:
# left aligned stories
_img_section = soup.find_all(
"div",
class_="story-card-m__wrapper__ounrk story-card-m__left-align__2JTUo story-card-m__bn-wrapper__OgEBK",
)[0]
_img_src = _img_section.find_all("img")[0]["src"]
_img_src = _img_src.split("?")[0]
except:
# new card type images > oct, 2020
try:
_img_section = soup.find_all(
"div",
class_="card-image-wrapper cardImage-m__card-image-wrapper__2Ozvn",
)[0]
_img_src = _img_section.find_all("img")[0]["src"]
_img_src = _img_src.split("?")[0]
except:
try:
# even older version
_img_section = soup.find_all(
"div",
class_="story-card-m__wrapper__ounrk story-card-m__bn-wrapper__OgEBK story-card-m__left-align__2JTUo",
)[0]
_img_src = _img_section.find_all("img")[0]["src"]
_img_src = _img_src.split("?")[0]
except:
print("Warning: no method worked for finding the image src")
return ""
main_image_url = ""
if len(_img_src) != 0:
main_image_url = _img_src
return main_image_url
else:
raise Exception("Error: in finding image src")
def get_story_text():
global soup
_story_text_section = soup.find("div", class_="story-content no-key-elements")
_text_divs = _story_text_section.find_all(
"div", class_="story-element story-element-text"
)
lines = []
for _div in _text_divs:
p = _div.find_all("p")
# writing all the story lines
for _p in p:
lines.append(_p.text)
if len(lines) != 0:
return lines
else:
raise Exception("Error: no story text found")
def get_other_images():
# other small image's url
pass
def write_image(file_name):
global story
url = story["img_src"]
response = requests.get(url, stream=True)
with open(f"./stories/images/prothomalo/{file_name}.jpg", "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
def make_story():
global story
story_name = story["name"]
story_name = story_name.strip().replace(" ", "-")
author_name = story["author"]
author_name = author_name.strip().replace(" ", "-")
file_name = f"{story_name}@{author_name}"
print(f"making story: {file_name}")
f = open(f"./stories/prothomalo/{file_name}.md", "w")
f.write("<div align=center>")
# if the story has a valid image src write it
if story["img_src"] != "":
write_image(file_name)
f.write(
f" <img align=center src='../images/prothomalo/{file_name}.jpg' width=500px >\n\n"
)
f.write(f"<h2 align=center>{story['name']}</h4>")
f.write(f"<h3 align=center>{story['author']}</h3>\n</div>\n\n")
for line in story["text"]:
f.write(f"{line}\n\n")
f.close()
print("completed :)")
def get_story():
global story
story["name"] = ""
story["author"] = ""
story["text"] = ""
story["img_src"] = ""
story["name"] = get_story_name()
story["author"] = get_story_author()
story["text"] = get_story_text()
story["img_src"] = get_main_image()
make_story()
# get_story()
# print(get_main_image())
# print(get_story_text())
# print(get_story_author())
# getting the url to scrape
input_file = open('./urls','r')
urls = input_file.readlines()
input_file.close()
base_url = "https://www.prothomalo.com"
print(len(urls))
for i in range(0, len(urls)):
url = urls[i]
print(url)
global browser
global soup
options = Options()
ua = UserAgent()
userAgent = ua.random
print(userAgent)
options.add_argument(f"user-agent={userAgent}")
options.add_argument("headless") # headless mode, suitable for CI/CD
browser = webdriver.Chrome(chrome_options=options)
browser.get(url)
# scrolling to get the lazy-loading image src
lastHeight = browser.execute_script("return document.body.scrollHeight")
html = browser.page_source
browser.quit()
soup = BeautifulSoup(html, "lxml")
try:
get_story()
except:
print("something went wrong, skipping this one....")
sleep(randint(3, 6))
|
the-stack_0_18015 | import sys
import unittest
from unittest import mock
from django import __version__
from django.core.management import CommandError, call_command
from django.test import SimpleTestCase
from django.test.utils import captured_stdin, captured_stdout
class ShellCommandTestCase(SimpleTestCase):
script_globals = 'print("__name__" in globals())'
script_with_inline_function = (
"import django\ndef f():\n print(django.__version__)\nf()"
)
def test_command_option(self):
with self.assertLogs("test", "INFO") as cm:
call_command(
"shell",
command=(
"import django; from logging import getLogger; "
'getLogger("test").info(django.__version__)'
),
)
self.assertEqual(cm.records[0].getMessage(), __version__)
def test_command_option_globals(self):
with captured_stdout() as stdout:
call_command("shell", command=self.script_globals)
self.assertEqual(stdout.getvalue().strip(), "True")
def test_command_option_inline_function_call(self):
with captured_stdout() as stdout:
call_command("shell", command=self.script_with_inline_function)
self.assertEqual(stdout.getvalue().strip(), __version__)
@unittest.skipIf(
sys.platform == "win32", "Windows select() doesn't support file descriptors."
)
@mock.patch("django.core.management.commands.shell.select")
def test_stdin_read(self, select):
with captured_stdin() as stdin, captured_stdout() as stdout:
stdin.write("print(100)\n")
stdin.seek(0)
call_command("shell")
self.assertEqual(stdout.getvalue().strip(), "100")
@unittest.skipIf(
sys.platform == "win32",
"Windows select() doesn't support file descriptors.",
)
@mock.patch("django.core.management.commands.shell.select") # [1]
def test_stdin_read_globals(self, select):
with captured_stdin() as stdin, captured_stdout() as stdout:
stdin.write(self.script_globals)
stdin.seek(0)
call_command("shell")
self.assertEqual(stdout.getvalue().strip(), "True")
@unittest.skipIf(
sys.platform == "win32",
"Windows select() doesn't support file descriptors.",
)
@mock.patch("django.core.management.commands.shell.select") # [1]
def test_stdin_read_inline_function_call(self, select):
with captured_stdin() as stdin, captured_stdout() as stdout:
stdin.write(self.script_with_inline_function)
stdin.seek(0)
call_command("shell")
self.assertEqual(stdout.getvalue().strip(), __version__)
@mock.patch("django.core.management.commands.shell.select.select") # [1]
@mock.patch.dict("sys.modules", {"IPython": None})
def test_shell_with_ipython_not_installed(self, select):
select.return_value = ([], [], [])
with self.assertRaisesMessage(
CommandError, "Couldn't import ipython interface."
):
call_command("shell", interface="ipython")
@mock.patch("django.core.management.commands.shell.select.select") # [1]
@mock.patch.dict("sys.modules", {"bpython": None})
def test_shell_with_bpython_not_installed(self, select):
select.return_value = ([], [], [])
with self.assertRaisesMessage(
CommandError, "Couldn't import bpython interface."
):
call_command("shell", interface="bpython")
# [1] Patch select to prevent tests failing when when the test suite is run
# in parallel mode. The tests are run in a subprocess and the subprocess's
# stdin is closed and replaced by /dev/null. Reading from /dev/null always
# returns EOF and so select always shows that sys.stdin is ready to read.
# This causes problems because of the call to select.select() toward the
# end of shell's handle() method.
|
the-stack_0_18016 | import os
from flask import Flask, request, flash, redirect, send_file
from werkzeug.utils import secure_filename
from models.model0 import Classifier
from img_func import save_bbox
app = Flask(__name__)
# Set up the directory where the files are to stored
UPLOAD_FOLDER = './images'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def file_is_allowed(filename):
# We need give a proper condition for checking filename
if True:
return True
# Our trained model
model = Classifier()
@app.route('/', methods=['POST'])
def upload_file():
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and file_is_allowed(file.filename):
filename = secure_filename(file.filename)
filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filename)
results = model.predict(filename)
save_bbox(filename, results)
# os.remove(filename)
return send_file(filename, as_attachment=True)
# check the post api : curl -v -X POST -H "Content-Type: multipart/form-data" -F "file=@<file location>" http://localhost:5000 -o <output file>
# replace <file location> with the image name
if __name__ == '__main__':
app.run(debug=True) |
the-stack_0_18017 | from es_common.enums.es_enum import ESEnum
class RobotName(ESEnum):
NAO = 0
PEPPER = 1
class SpeechActsType(ESEnum):
UNDEFINED = -1
FORMAL = 0
INFORMAL = 1
class VoiceTag(ESEnum):
SPEED = "rspd"
PITCH = "vct"
PROSODY = "bound"
STYLE = "style"
VOLUME = "vol"
PAUSE = "pau"
RESET = "rst"
class RobotLanguage(ESEnum):
ENGLISH = "en"
FRENCH = "fr"
DUTCH = "nl"
|
the-stack_0_18018 | import random
import taichi as ti
ti.init(arch=ti.cpu)
n = 8
x = ti.field(dtype=ti.f32, shape=n, needs_grad=True)
y = ti.field(dtype=ti.f32, shape=n)
L = ti.field(dtype=ti.f32, shape=(), needs_grad=True)
@ti.kernel
def reduce():
for i in range(n):
L[None] += 0.5 * (x[i] - y[i])**2
@ti.kernel
def gradient_descent():
for i in x:
x[i] -= x.grad[i] * 0.1
def main():
# Initialize vectors
for i in range(n):
x[i] = random.random()
y[i] = random.random()
# Optimize with 100 gradient descent iterations
for k in range(100):
with ti.Tape(loss=L):
reduce()
print('Loss =', L[None])
gradient_descent()
for i in range(n):
# Now you should approximately have x[i] == y[i]
print(x[i], y[i])
if __name__ == '__main__':
main()
|
the-stack_0_18019 | """
Provide tests for command line interface's node get configurations command.
"""
import json
import re
import pytest
from click.testing import CliRunner
from cli.constants import (
ADDRESS_REGEXP,
DEV_CONSENSUS_GENESIS_NODE_IP_ADDRESS_FOR_TESTING,
FAILED_EXIT_FROM_COMMAND_CODE,
PASSED_EXIT_FROM_COMMAND_CODE,
PUBLIC_KEY_REGEXP,
)
from cli.entrypoint import cli
from cli.utils import dict_to_pretty_json
def test_get_node_configs():
"""
Case: get node configurations.
Expect: node public key and address are returned.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'node',
'get-configs',
'--node-url',
DEV_CONSENSUS_GENESIS_NODE_IP_ADDRESS_FOR_TESTING,
])
node_configurations = json.loads(result.output).get('result').get('configurations')
node_address = node_configurations.get('node_address')
node_public_key = node_configurations.get('node_public_key')
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert re.match(pattern=ADDRESS_REGEXP, string=node_address) is not None
assert re.match(pattern=PUBLIC_KEY_REGEXP, string=node_public_key) is not None
def test_get_node_configs_without_node_url(mocker, node_configurations):
"""
Case: get node configurations without passing node URL.
Expect: batch identifier is returned from a node on localhost.
"""
mock_get_node_configs = mocker.patch('cli.node.service.loop.run_until_complete')
mock_get_node_configs.return_value = node_configurations
runner = CliRunner()
result = runner.invoke(cli, [
'node',
'get-configs',
])
expected_node_configurations = {
'result': {
'configurations': node_configurations.data,
},
}
assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert expected_node_configurations == json.loads(result.output)
def test_get_node_configs_invalid_node_url():
"""
Case: get node configurations by passing an invalid node URL.
Expect: the following node URL is invalid error message.
"""
invalid_node_url = 'domainwithoutextention'
runner = CliRunner()
result = runner.invoke(cli, [
'node',
'get-configs',
'--node-url',
invalid_node_url,
])
expected_error = {
'errors': {
'node_url': [
f'The following node URL `{invalid_node_url}` is invalid.',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
@pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com'])
def test_get_node_configs_node_url_with_protocol(node_url_with_protocol):
"""
Case: get node configurations by passing node URL with an explicit protocol.
Expect: the following node URL contains a protocol error message.
"""
runner = CliRunner()
result = runner.invoke(cli, [
'node',
'get-configs',
'--node-url',
node_url_with_protocol,
])
expected_error = {
'errors': {
'node_url': [
f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).',
],
},
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
def test_get_node_configs_non_existing_node_url():
"""
Case: get node configurations by passing the non-existing node URL.
Expect: check if node running at the URL error message.
"""
non_existing_node_url = 'non-existing-node.com'
runner = CliRunner()
result = runner.invoke(cli, [
'node',
'get-configs',
'--node-url',
non_existing_node_url,
])
expected_error = {
'errors': f'Please check if your node running at http://{non_existing_node_url}:8080.',
}
assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code
assert dict_to_pretty_json(expected_error) in result.output
|
the-stack_0_18021 | import asyncio
from aredis import StrictRedis # type: ignore
from jaeger_client import Config, Tracer # type: ignore
from opentracing.scope_managers.asyncio import AsyncioScopeManager # type: ignore
from rap.server import Server, UserChannel
from rap.server.plugin.processor import TracingProcessor
async def echo_body(channel: UserChannel) -> None:
cnt: int = 0
async for body in channel.iter_body():
await asyncio.sleep(1)
cnt += 1
print(cnt, body)
if cnt > 2:
break
await channel.write(f"pong! {cnt}")
async def async_sum(a: int, b: int) -> int:
await asyncio.sleep(0.01) # mock io time
return a + b
if __name__ == "__main__":
import logging
logging.basicConfig(
format="[%(asctime)s %(levelname)s] %(message)s", datefmt="%y-%m-%d %H:%M:%S", level=logging.DEBUG
)
loop = asyncio.new_event_loop()
redis: StrictRedis = StrictRedis.from_url("redis://localhost")
rpc_server: Server = Server("example")
opentracing_config: Config = Config(
config={
"sampler": {"type": "const", "param": 1},
"logging": True,
"local_agent": {"reporting_host": "127.0.0.1"},
},
scope_manager=AsyncioScopeManager(),
service_name="rap server opentracing example",
)
jaeger_tracer: Tracer = opentracing_config.initialize_tracer()
rpc_server.load_processor([TracingProcessor(jaeger_tracer)])
rpc_server.register(async_sum)
rpc_server.register(echo_body)
loop.run_until_complete(rpc_server.run_forever())
jaeger_tracer.close()
|
the-stack_0_18023 | #
print('Gerador de PA')
print('-=' * 10)
primeiro = int(input('Primeiro termo: '))
razao = int(input('razão da PA: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print(f'{termo} -', end='')
termo += razao
cont += 1
print('Pausa')
mais = int(input('Quantos termos vc quer mostrar a mais? '))
print(f'progressão finalizada com {total} termos mostrados. \nFIM')
|
the-stack_0_18024 | #!/usr/bin/env python3
# pylint: disable=invalid-name, missing-docstring, too-few-public-methods
#
# Copyright (c) 2019-Present VMware, Inc. or its affiliates.
#
import os
from gppylib.test.unit.gp_unittest import GpTestCase, run_tests
from gppylib.commands import gp
from gppylib.db import dbconn
class Context(object):
filename = os.path.join(gp.get_coordinatordatadir(), 'gpexpand.status')
dbname = os.getenv('PGDATABASE', 'postgres')
dburl = dbconn.DbURL(dbname=dbname)
conn = dbconn.connect(dburl)
day = 0
ctx = Context()
def get_gpexpand_status():
st = gp.get_gpexpand_status()
st.dbname = ctx.dbname
return st.get_status()
def insert_status(status):
ctx.day += 1
dbconn.execSQL(ctx.conn, '''
INSERT INTO gpexpand.status VALUES
( '{status}', date '2001-01-01' + interval '{day} day');
'''.format(status=status, day=ctx.day))
ctx.conn.commit()
def leave_phase1(func):
def wrapper(*args, **kwargs):
try:
os.unlink(ctx.filename)
except OSError:
pass
return func(*args, **kwargs)
return wrapper
def leave_phase2(func):
def wrapper(*args, **kwargs):
dbconn.execSQL(ctx.conn, '''
DROP SCHEMA IF EXISTS gpexpand CASCADE;
''')
ctx.conn.commit()
return func(*args, **kwargs)
return wrapper
def drop_table(name):
def decorator(func):
def wrapper(*args, **kwargs):
dbconn.execSQL(ctx.conn, '''
DROP TABLE IF EXISTS {name};
'''.format(name=name))
ctx.conn.commit()
return func(*args, **kwargs)
return wrapper
return decorator
def start_redistribution(func):
def wrapper(*args, **kwargs):
insert_status('EXPANSION STARTED')
return func(*args, **kwargs)
return wrapper
def stop_redistribution(func):
def wrapper(*args, **kwargs):
insert_status('EXPANSION STOPPED')
return func(*args, **kwargs)
return wrapper
def expanding_table(name):
def decorator(func):
def wrapper(*args, **kwargs):
dbconn.execSQL(ctx.conn, '''
UPDATE gpexpand.status_detail SET STATUS='IN PROGRESS'
WHERE fq_name='{name}';
'''.format(name=name))
ctx.conn.commit()
return func(*args, **kwargs)
return wrapper
return decorator
def expanded_table(name):
def decorator(func):
def wrapper(*args, **kwargs):
dbconn.execSQL(ctx.conn, '''
UPDATE gpexpand.status_detail SET STATUS='COMPLETED'
WHERE fq_name='{name}';
'''.format(name=name))
ctx.conn.commit()
return func(*args, **kwargs)
return wrapper
return decorator
class GpExpandUtils(GpTestCase):
def setUp(self):
ctx.day = 1
dbconn.execSQL(ctx.conn, '''
DROP SCHEMA IF EXISTS gpexpand CASCADE;
CREATE SCHEMA gpexpand;
CREATE TABLE gpexpand.status (status text, updated timestamp);
CREATE TABLE gpexpand.status_detail (
dbname text,
fq_name text,
schema_oid oid,
table_oid oid,
distribution_policy smallint[],
distribution_policy_names text,
distribution_policy_coloids text,
distribution_policy_type text,
root_partition_oid oid,
storage_options text,
rank int,
status text,
expansion_started timestamp,
expansion_finished timestamp,
source_bytes numeric
);
INSERT INTO gpexpand.status VALUES
( 'SETUP', '2001-01-01' ),
( 'SETUP DONE', '2001-01-02' );
INSERT INTO gpexpand.status_detail (dbname, fq_name, rank, status) VALUES
('fake_db', 'public.t1', 2, 'NOT STARTED'),
('fake_db', 'public.t2', 2, 'NOT STARTED');
'''.format(dbname=ctx.dbname))
ctx.conn.commit()
with open(ctx.filename, 'w') as f:
f.write('''UNINITIALIZED:None
EXPANSION_PREPARE_STARTED:<filename>
BUILD_SEGMENT_TEMPLATE_STARTED:<filename>
BUILD_SEGMENT_TEMPLATE_DONE:None
BUILD_SEGMENTS_STARTED:<filename>
BUILD_SEGMENTS_DONE:<number>
UPDATE_CATALOG_STARTED:<filename>
UPDATE_CATALOG_DONE:None
SETUP_EXPANSION_SCHEMA_STARTED:None
SETUP_EXPANSION_SCHEMA_DONE:None
PREPARE_EXPANSION_SCHEMA_STARTED:None
PREPARE_EXPANSION_SCHEMA_DONE:None
EXPANSION_PREPARE_DONE:None
''')
@leave_phase1
@leave_phase2
def tearDown(self):
pass
@leave_phase1
@leave_phase2
def test_when_no_expansion(self):
st = get_gpexpand_status()
self.assertEqual(st.phase, 0)
self.assertEqual(st.status, 'NO EXPANSION DETECTED')
self.assertEqual(len(st.uncompleted), 0)
self.assertEqual(len(st.inprogress), 0)
self.assertEqual(len(st.completed), 0)
st.get_progress()
self.assertEqual(st.phase, 0)
self.assertEqual(st.status, 'NO EXPANSION DETECTED')
self.assertEqual(len(st.uncompleted), 0)
self.assertEqual(len(st.inprogress), 0)
self.assertEqual(len(st.completed), 0)
def test_phase1_with_empty_status(self):
with open(ctx.filename, 'w'):
pass
st = get_gpexpand_status()
self.assertEqual(st.phase, 1)
self.assertEqual(st.status, 'UNKNOWN PHASE1 STATUS')
def test_phase1_with_normal_status(self):
st = get_gpexpand_status()
self.assertEqual(st.phase, 1)
self.assertEqual(st.status, 'EXPANSION_PREPARE_DONE')
@leave_phase1
@drop_table('gpexpand.status_detail')
def test_phase2_when_missing_status_detail(self):
st = get_gpexpand_status()
st.get_progress()
self.assertEqual(st.phase, 2)
self.assertEqual(st.status, 'SETUP DONE')
self.assertEqual(len(st.uncompleted), 0)
self.assertEqual(len(st.inprogress), 0)
self.assertEqual(len(st.completed), 0)
@leave_phase1
def test_phase2_when_setup_done(self):
st = get_gpexpand_status()
st.get_progress()
self.assertEqual(st.phase, 2)
self.assertEqual(st.status, 'SETUP DONE')
self.assertEqual(len(st.uncompleted), 2)
self.assertEqual(len(st.inprogress), 0)
self.assertEqual(len(st.completed), 0)
@leave_phase1
@start_redistribution
@expanding_table('public.t1')
def test_phase2_when_expanding_first_table(self):
st = get_gpexpand_status()
st.get_progress()
self.assertEqual(st.phase, 2)
self.assertEqual(st.status, 'EXPANSION STARTED')
self.assertEqual(len(st.uncompleted), 1)
self.assertEqual(len(st.inprogress), 1)
self.assertEqual(len(st.completed), 0)
@leave_phase1
@start_redistribution
@expanded_table('public.t1')
def test_phase2_when_expanded_first_table(self):
st = get_gpexpand_status()
st.get_progress()
self.assertEqual(st.phase, 2)
self.assertEqual(st.status, 'EXPANSION STARTED')
self.assertEqual(len(st.uncompleted), 1)
self.assertEqual(len(st.inprogress), 0)
self.assertEqual(len(st.completed), 1)
@leave_phase1
@start_redistribution
@expanded_table('public.t1')
@expanding_table('public.t2')
def test_phase2_when_expanding_last_table(self):
st = get_gpexpand_status()
st.get_progress()
self.assertEqual(st.phase, 2)
self.assertEqual(st.status, 'EXPANSION STARTED')
self.assertEqual(len(st.uncompleted), 0)
self.assertEqual(len(st.inprogress), 1)
self.assertEqual(len(st.completed), 1)
@leave_phase1
@start_redistribution
@expanded_table('public.t1')
@expanded_table('public.t2')
@stop_redistribution
def test_phase2_when_expanded_last_table(self):
st = get_gpexpand_status()
st.get_progress()
self.assertEqual(st.phase, 2)
self.assertEqual(st.status, 'EXPANSION STOPPED')
self.assertEqual(len(st.uncompleted), 0)
self.assertEqual(len(st.inprogress), 0)
self.assertEqual(len(st.completed), 2)
if __name__ == '__main__':
run_tests()
|
the-stack_0_18027 | #!/usr/bin/env python
# common/general.py
#
# Copyright (C) 2017-2020 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""General-purpose functions not tied to a particular project."""
from subprocess import PIPE, Popen
from typing import Any, Iterable, Optional, Tuple
def get_shell_type() -> Optional[str]:
"""
Determines if inside IPython prompt.
Returns:
Optional[str]: Type of shell in use, or None if not in a shell
"""
try:
# noinspection Mypy
shell = str(get_ipython().__class__.__name__)
if shell == "ZMQInteractiveShell":
# IPython in Jupyter Notebook
return shell
if shell == "InteractiveShellEmbed":
# IPython in Jupyter Notebook using IPython.embed
return shell
if shell == "TerminalInteractiveShell":
# IPython in terminal
return shell
# Other
return shell
except NameError:
# Not in IPython
return None
def input_prefill(prompt: str, prefill: str) -> str:
"""
Prompts user for input with pre-filled text.
Does not handle colored prompt correctly
TODO: Does this block CTRL-D?
Arguments:
prompt (str): Prompt to present to user
prefill (str): Text to prefill for user
Returns:
str: Text inputted by user
"""
from readline import insert_text, redisplay, set_pre_input_hook
def pre_input_hook() -> None:
insert_text(prefill)
redisplay()
set_pre_input_hook(pre_input_hook)
result = input(prompt)
set_pre_input_hook()
return result
def run_command(
command: str,
timeout: int = 600,
acceptable_exitcodes: Optional[Iterable[int]] = None,
) -> Tuple[int, Optional[str], Optional[str]]:
"""
Arguments:
command: command to run
timeout: maximum time to await command's completion
acceptable_exitcodes: acceptable exit codes
Returns:
exitcode, standard output, and standard error
Raises:
ValueError: If exitcode is not in acceptable_exitcodes
"""
if acceptable_exitcodes is None:
acceptable_exitcodes = [0]
with Popen(command, shell=True, stdout=PIPE, stderr=PIPE) as child:
exitcode = child.wait(timeout)
stdout, stderr = child.communicate()
try:
stdout = stdout.decode("utf8")
except UnicodeDecodeError:
stdout = stdout.decode("ISO-8859-1")
try:
stderr = stderr.decode("utf8")
except UnicodeDecodeError:
stderr = stderr.decode("ISO-8859-1")
if exitcode not in acceptable_exitcodes:
raise ValueError(
f"subprocess failed with exit code {exitcode};\n\n"
f"STDOUT:\n"
f"{stdout}\n\n"
f"STDERR:\n"
f"{stderr}"
)
return (exitcode, stdout, stderr)
|
the-stack_0_18030 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Bosch Rexroth AG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import getopt
import time
from datetime import datetime
from typing import List
import flatbuffers
import datalayer
from datalayer.variant import Result, Variant
from comm.datalayer import SubscriptionProperties
import debugging
def main():
# This is the Data Layer client connection string for TCP in the format: tcp://USER:PASSWORD@IP_ADDRESS:2069
# Please check and change according your environment:
# - USER: Enter your user name here - default is boschrexroth
# - PASSWORD: Enter your password here - default is boschrexroth
# - IP_ADDRESS: 10.0.2.2 If you develop in a VM (QEMU, Virtual Box, ...) and you want to connect to a ctrlX CORE virtual with port forwarding
# 192.168.1.1 If you are using a virtual with TAP adpater or a ctrlX with this IP address
# aaa.bbb.ccc.ddd If you are using a ctrlX CORE with this IP address
debugging.breakpoint()
connectionClient = "tcp://boschrexroth:[email protected]:2069"
if 'SNAP' in os.environ:
connectionClient = "ipc://"
print()
print("========================================================================")
print("sdk-py-datalayer-client - A ctrlX Data Layer Client App in Python")
with datalayer.system.System("") as datalayer_system:
print("INFO Starting Data Layer system")
datalayer_system.start(False)
# Create Data Layer client connection
print("INFO Creating Data Layer Client connection to ctrlX with",
connectionClient)
with datalayer_system.factory().create_client(connectionClient) as datalayer_client:
# Check if client is connected
print("INFO Client connected:", datalayer_client.is_connected())
if datalayer_client.is_connected() is False:
return
# Define the subscription properties by using Flatbuffers class SubscriptionProperties
builder = flatbuffers.Builder(1024)
id = builder.CreateString("sdk-py-sub")
SubscriptionProperties.SubscriptionPropertiesStart(builder)
SubscriptionProperties.SubscriptionPropertiesAddId(builder, id)
SubscriptionProperties.SubscriptionPropertiesAddKeepaliveInterval(
builder, 10000)
SubscriptionProperties.SubscriptionPropertiesAddPublishInterval(
builder, 1000)
SubscriptionProperties.SubscriptionPropertiesAddErrorInterval(
builder, 10000)
properties = SubscriptionProperties.SubscriptionPropertiesEnd(
builder)
builder.Finish(properties)
sub_prop = Variant()
sub_prop.set_flatbuffers(builder.Output())
# Create subscription
print("INFO Creating subscription")
result, sub = datalayer_client.create_subscription_sync(
sub_prop, cb_subscription_sync)
if result is not Result.OK:
print("ERROR Creating subscription failed:", result)
# Add subscription node
print("INFO Add subscription node")
sub_adr = "framework/metrics/system/cpu-utilisation-percent"
result = sub.subscribe(sub_adr)
if result is not Result.OK:
print("ERROR Adding subscription node failed:", result)
while datalayer_client.is_connected():
dt_str = datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f")
addr = "framework/metrics/system/memused-percent"
result, read_var = datalayer_client.read_sync(addr)
val = read_var.get_float64()
print("INFO read_sync: %s, %s: %f" % (dt_str, addr, val))
'''
addr = "scheduler/admin/state"
dt_str = datetime.now().strftime("%d/%m/%Y %H:%M:%S.%f")
conv = datalayer_system.json_converter()
result, read_var = datalayer_client.read_json_sync(
conv, "scheduler/admin/state", 0)
state = read_var.get_string()
print("INFO read_json_sync: %s, %s: %s" %
(dt_str, addr, state))
if 'RUN' in state:
state = state.replace('RUN', 'CONFIG')
else:
state = state.replace('CONFIG', 'RUN')
print("New state: ", state)
# Result.OK expected
result, error = datalayer_client.write_json_sync(
conv, addr, state)
print("write_json_sync Result:", result)
# Result.INVALID_ADDRESS expected
result, error = datalayer_client.write_json_sync(
conv, addr+"x", state)
print("write_json_sync with invalid address Result:",
result, "Error:", error.get_string())
'''
time.sleep(5.0)
print("ERROR Data Layer connection")
print("INFO Close subscription")
sub.close()
print("INFO Stopping Data Layer system")
datalayer_system.stop(True)
# Response notify callback function
def cb_subscription_sync(result: Result, items: List[datalayer.subscription.NotifyItem], userdata):
if result is not Result.OK:
print("ERROR notify subscription:", result)
return
timestamp = items[0].get_timestamp()
# convert ldap to unix timestamp
dt = datetime.fromtimestamp(timestamp/10000000-11644473600)
dt_str = dt.strftime("%d/%m/%Y %H:%M:%S.%f")
address = items[0].get_address()
val = Variant.get_float64(items[0].get_data())
print("INFO Subscription notification: %s, %s: %f" %
(dt_str, address, val))
if __name__ == '__main__':
print('Number of arguments:', len(sys.argv), 'arguments.')
print('Argument List:', str(sys.argv))
debugging.init()
main()
|
the-stack_0_18032 | """
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| void (kind=V)
|
| str_ (string_, bytes_) (kind=S) [Python 2]
| unicode_ (kind=U) [Python 2]
|
| bytes_ (string_) (kind=S) [Python 3]
| str_ (unicode_) (kind=U) [Python 3]
|
\\-> object_ (not used much) (kind=O)
"""
from __future__ import division, absolute_import, print_function
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data','datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
from numpy.core.multiarray import (
typeinfo, ndarray, array, empty, dtype, datetime_data,
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
import types as _types
import sys
from numpy.compat import bytes, long
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
#import string
# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
# LOWER_TABLE)
# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \
# UPPER_TABLE)
#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name=='void':
char = 'V'
base = 'void'
elif name=='object_':
char = 'O'
base = 'object'
bits = 0
elif name=='datetime64':
char = 'M'
elif name=='timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
if name=='bytes_':
char = 'S'
base = 'bytes'
elif name=='str_':
char = 'U'
base = 'str'
else:
if name=='string_':
char = 'S'
base = 'string'
elif name=='unicode_':
char = 'U'
base = 'unicode'
bytes = bits // 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui': continue
if base != '':
myname = "%s%d" % (base, bit)
if (name != 'longdouble' and name != 'clongdouble') or \
myname not in allTypes.keys():
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit//2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
# Integers handled so that
# The int32, int64 types should agree exactly with
# PyArray_INT32, PyArray_INT64 in C
# We need to enforce the same checking as is done
# in arrayobject.h where the order of getting a
# bit-width match is:
# long, longlong, int, short, char
# for int8, int16, int32, int64, int128
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits//8,)
ucharname = 'u%d' % (bits//8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in allTypes.keys():
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
# We use these later
void = allTypes['void']
generic = allTypes['generic']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('object_', 'object')]
if sys.version_info[0] >= 3:
type_pairs.extend([('bytes_', 'string'),
('str_', 'unicode'),
('string_', 'string')])
else:
type_pairs.extend([('str_', 'string'),
('string_', 'string'),
('bytes_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta']
if sys.version_info[0] >= 3:
# Py3K
to_remove.append('bytes')
to_remove.append('str')
to_remove.remove('unicode')
to_remove.remove('long')
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p','P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool,object,str,unicode,void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(np.int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
>>> np.maximum_sctype(np.complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
<type 'numpy.string_'>
>>> np.maximum_sctype('i2')
<type 'numpy.int64'>
>>> np.maximum_sctype('f4')
<type 'numpy.float96'>
"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
try:
buffer_type = _types.BufferType
except AttributeError:
# Py3K
buffer_type = memoryview
_python_types = {int : 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
bytes: 'bytes_',
unicode: 'unicode_',
buffer_type: 'void',
}
if sys.version_info[0] >= 3:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, type):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
else:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<type 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<type 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<type 'numpy.complex128'>
>>> np.obj2sctype(dict)
<type 'numpy.object_'>
>>> np.obj2sctype('string')
<type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
<type 'list'>
"""
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError is one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, np.int)
True
>>> np.issubclass_(np.int32, np.float)
False
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype,obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
True
>>> np.issubsctype(np.array([1]), np.int)
True
>>> np.issubsctype(np.array([1]), np.float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1, arg2 : dtype_like
dtype or string representing a typecode.
Returns
-------
out : bool
See Also
--------
issubsctype, issubclass_
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] // 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
... print np.sctype2char(sctype)
l
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
return _sctype2char_dict[sctype]
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,),key).dtype.str[1:]
# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
if val not in sctypeDict:
sctypeDict[val] = key
# Add additional strings to the sctypeDict
if sys.version_info[0] >= 3:
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
else:
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
typeDict = sctypeDict
typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# M -> datetime
# m -> timedelta
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a,b], start=thisind)
# Find a data-type that all data-types in a list can be coerced to
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, np.complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [np.complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc,maxa)
else:
return maxa
|
the-stack_0_18034 | # -*- coding: utf-8 -*-
# Copyright (c) 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Execute profiling."""
import os
from typing import Any, Dict
from neural_compressor.ux.components.db_manager.db_operations import (
ProfilingAPIInterface,
ProjectAPIInterface,
)
from neural_compressor.ux.components.profiling.profiling import Profiling
from neural_compressor.ux.utils.consts import ExecutionStatus
from neural_compressor.ux.utils.exceptions import ClientErrorException, InternalException
from neural_compressor.ux.utils.executor import Executor
from neural_compressor.ux.utils.parser import ProfilingParser
from neural_compressor.ux.utils.templates.workdir import Workdir
from neural_compressor.ux.web.communication import MessageQueue
mq = MessageQueue()
def execute_profiling(data: Dict[str, Any]) -> None:
"""
Execute profiling.
Expected data:
{
"request_id": "asd",
"profiling_id": "1"
}
"""
if not all([str(data.get("request_id", "")), str(data.get("profiling_id", ""))]):
message = "Missing request id or profiling id."
mq.post_error(
"profiling_finish",
{"message": "Failure", "code": 404},
)
raise ClientErrorException(message)
request_id: str = str(data["request_id"])
profiling_id: int = int(data["profiling_id"])
try:
profiling_details = ProfilingAPIInterface.get_profiling_details({"id": profiling_id})
project_id = profiling_details["project_id"]
project_details = ProjectAPIInterface.get_project_details({"id": project_id})
ProfilingAPIInterface.update_profiling_status(
{
"id": profiling_id,
"status": ExecutionStatus.WIP,
},
)
response_data = execute_real_profiling(
request_id=request_id,
project_details=project_details,
profiling_details=profiling_details,
)
mq.post_success("profiling_finish", response_data)
except Exception:
ProfilingAPIInterface.update_profiling_status(
{
"id": profiling_id,
"status": ExecutionStatus.ERROR,
},
)
mq.post_error(
"profiling_finish",
{"message": "Failure", "code": 404, "request_id": request_id},
)
raise
def execute_real_profiling(
request_id: str,
project_details: dict,
profiling_details: dict,
) -> dict:
"""Execute profiling."""
profiling: Profiling = Profiling(project_details, profiling_details)
Workdir.clean_logs(profiling.workdir)
profiling.generate_config()
logs = [os.path.join(profiling.workdir, "output.txt")]
ProfilingAPIInterface.update_log_path(
{
"id": profiling.profiling_id,
"log_path": logs[0],
},
)
ProfilingAPIInterface.update_execution_command(
{
"id": profiling.profiling_id,
"execution_command": profiling.command,
},
)
send_data = {
"message": "started",
"request_id": request_id,
"output_path": logs[0],
}
executor = Executor(
workspace_path=profiling.workdir,
subject="profiling",
data=send_data,
log_name="output",
)
proc = executor.call(
profiling.command,
)
parser = ProfilingParser(logs)
parsed_data = parser.process()
if not proc.is_ok:
raise InternalException("Profiling failed during execution.")
ProfilingAPIInterface.bulk_add_results(
profiling_id=profiling.profiling_id,
results=parsed_data.get("profiling_data", []),
)
ProfilingAPIInterface.update_profiling_duration(
{
"id": profiling.profiling_id,
"duration": executor.process_duration,
},
)
ProfilingAPIInterface.update_profiling_status(
{
"id": profiling.profiling_id,
"status": ExecutionStatus.SUCCESS,
},
)
profiling_data = ProfilingAPIInterface.get_profiling_details(
{
"id": profiling.profiling_id,
},
)
response_data = {
"request_id": request_id,
}
response_data.update(profiling_data)
return response_data
|
the-stack_0_18036 | #! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
# NPS
# pulls images from video device and places them in a Queue or starts an inference for them on a network processor
import cv2
import queue
import threading
import time
from ssd_mobilenet_processor import SsdMobileNetProcessor
from queue import Queue
class VideoProcessor:
"""Class that pulls frames from a video file and either starts an inference with them or
puts them on a queue depending on how the instance is constructed.
"""
def __init__(self, video_file:str, request_video_width:int=640, request_video_height:int = 480,
network_processor:SsdMobileNetProcessor=None, output_queue:Queue=None, queue_put_wait_max:float = 0.01,
queue_full_sleep_seconds:float = 0.1):
"""Initializer for the class.
:param video_file: file name of the file from which to read video frames
:param request_video_width: the width in pixels to request from the video device, may be ignored.
:param request_video_height: the height in pixels to request from the video device, may be ignored.
:param network_processor: neural network processor on which we will start inferences for each frame.
If a value is passed
for this parameter then the output_queue, queue_put_wait_max, and queue_full_sleep_seconds will be ignored
and should be None
:param output_queue: A queue on which the video frames will be placed if the network_processor is None
:param queue_put_wait_max: The max number of seconds to wait when putting on output queue
:param queue_full_sleep_seconds: The number of seconds to sleep when the output queue is full.
"""
self._queue_full_sleep_seconds = queue_full_sleep_seconds
self._queue_put_wait_max = queue_put_wait_max
self._video_file = video_file
self._request_video_width = request_video_width
self._request_video_height = request_video_height
self._pause_mode = False
# create the video device
self._video_device = cv2.VideoCapture(self._video_file)
if ((self._video_device == None) or (not self._video_device.isOpened())):
print('\n\n')
print('Error - could not open video device.')
print('If you installed python opencv via pip or pip3 you')
print('need to uninstall it and install from source with -D WITH_V4L=ON')
print('Use the provided script: install-opencv-from_source.sh')
print('\n\n')
return
# Request the dimensions
self._video_device.set(cv2.CAP_PROP_FRAME_WIDTH, self._request_video_width)
self._video_device.set(cv2.CAP_PROP_FRAME_HEIGHT, self._request_video_height)
# save the actual dimensions
self._actual_video_width = self._video_device.get(cv2.CAP_PROP_FRAME_WIDTH)
self._actual_video_height = self._video_device.get(cv2.CAP_PROP_FRAME_HEIGHT)
print('actual video resolution: ' + str(self._actual_video_width) + ' x ' + str(self._actual_video_height))
self._output_queue = output_queue
self._network_processor = network_processor
self._use_output_queue = False
if (not(self._output_queue is None)):
self._use_output_queue = True
self._worker_thread = None #threading.Thread(target=self._do_work, args=())
def get_actual_video_width(self):
""" get the width of the images that will be placed on queue or sent to neural network processor.
:return: the width of each frame retrieved from the video device
"""
return self._actual_video_width
# the
def get_actual_video_height(self):
"""get the height of the images that will be put in the queue or sent to the neural network processor
:return: The height of each frame retrieved from the video device
"""
return self._actual_video_height
def start_processing(self):
"""Starts the asynchronous thread reading from the video file and placing images in the output queue or sending to the
neural network processor
:return: None
"""
self._end_flag = False
if (self._use_output_queue):
if (self._worker_thread == None):
self._worker_thread = threading.Thread(target=self._do_work_queue, args=())
else:
if (self._worker_thread == None):
self._worker_thread = threading.Thread(target=self._do_work_network_processor, args=())
self._worker_thread.start()
def stop_processing(self):
"""stops the asynchronous thread from reading any new frames from the video device
:return:
"""
if (self._end_flag == True):
# Already stopped
return
self._end_flag = True
def pause(self):
"""pauses the aysnchronous processing so that it will not read any new frames until unpause is called.
:return: None
"""
self._pause_mode = True
def unpause(self):
""" Unpauses the asynchronous processing that was previously paused by calling pause
:return: None
"""
self._pause_mode = False
# Thread target. When call start_processing and initialized with an output queue,
# this function will be called in its own thread. it will keep working until stop_processing is called.
# or an error is encountered.
def _do_work_queue(self):
"""Thread target. When call start_processing and initialized with an output queue,
this function will be called in its own thread. it will keep working until stop_processing is called.
or an error is encountered. If the neural network processor was passed to the initializer rather than
a queue then this function will not be called.
:return: None
"""
print('in video_processor worker thread')
if (self._video_device == None):
print('video_processor _video_device is None, returning.')
return
while (not self._end_flag):
try:
while (self._pause_mode):
time.sleep(0.1)
ret_val, input_image = self._video_device.read()
if (not ret_val):
print("No image from video device, exiting")
break
self._output_queue.put(input_image, True, self._queue_put_wait_max)
except queue.Full:
# the video device is probably way faster than the processing
# so if our output queue is full sleep a little while before
# trying the next image from the video.
time.sleep(self._queue_full_sleep_seconds)
print('exiting video_processor worker thread for queue')
def _do_work_network_processor(self):
"""Thread target. when call start_processing and initialized with an neural network processor,
this function will be called in its own thread. it will keep working until stop_processing is called.
or an error is encountered. If the initializer was called with a queue rather than a neural network
processor then this will not be called.
:return: None
"""
print('in video_processor worker thread')
if (self._video_device == None):
print('video_processor _video_device is None, returning.')
return
while (not self._end_flag):
try:
while (self._pause_mode):
time.sleep(0.1)
# Read from the video file
ret_val, input_image = self._video_device.read()
if (not ret_val):
print("No image from video device, exiting")
break
self._network_processor.start_aysnc_inference(input_image)
except Exception:
# the video device is probably way faster than the processing
# so if our output queue is full sleep a little while before
# trying the next image from the video.
print("Exception occurred writing to the neural network processor.")
raise
print('exiting video_processor worker thread for network processor')
def cleanup(self):
"""Should be called once for each class instance when the class consumer is finished with it.
:return: None
"""
# wait for worker thread to finish if it still exists
if (not(self._worker_thread is None)):
self._worker_thread.join()
self._worker_thread = None
self._video_device.release()
|
the-stack_0_18037 | from collections import OrderedDict, defaultdict
from datetime import date
import pytest
import pytz
from bs4 import BeautifulSoup
from pytest import param as case
from urwid import Columns, Divider, Padding, Text
from zulipterminal.config.keys import keys_for_command, primary_key_for_command
from zulipterminal.config.symbols import (
QUOTED_TEXT_MARKER,
STATUS_ACTIVE,
STATUS_INACTIVE,
STREAM_TOPIC_SEPARATOR,
TIME_MENTION_MARKER,
)
from zulipterminal.helper import powerset
from zulipterminal.ui_tools.boxes import MessageBox
from zulipterminal.ui_tools.views import (
SIDE_PANELS_MOUSE_SCROLL_LINES,
LeftColumnView,
MessageView,
MiddleColumnView,
ModListWalker,
RightColumnView,
StreamsView,
StreamsViewDivider,
TabView,
TopicsView,
UsersView,
)
SUBDIR = "zulipterminal.ui_tools"
BOXES = SUBDIR + ".boxes"
VIEWS = SUBDIR + ".views"
MESSAGEVIEW = VIEWS + ".MessageView"
MIDCOLVIEW = VIEWS + ".MiddleColumnView"
SERVER_URL = "https://chat.zulip.zulip"
@pytest.fixture(params=[True, False], ids=["ignore_mouse_click", "handle_mouse_click"])
def compose_box_is_open(request):
return request.param
class TestModListWalker:
@pytest.fixture
def mod_walker(self):
return ModListWalker([list(range(1))])
@pytest.mark.parametrize(
"num_items, focus_position",
[
(5, 0),
(0, 0),
],
)
def test_extend(self, num_items, focus_position, mod_walker, mocker):
items = list(range(num_items))
mocker.patch.object(mod_walker, "_set_focus")
mod_walker.extend(items)
mod_walker._set_focus.assert_called_once_with(focus_position)
def test__set_focus(self, mod_walker, mocker):
mod_walker.read_message = mocker.Mock()
mod_walker._set_focus(0)
mod_walker.read_message.assert_called_once_with()
def test_set_focus(self, mod_walker, mocker):
mod_walker.read_message = mocker.Mock()
mod_walker.set_focus(0)
mod_walker.read_message.assert_called_once_with()
class TestMessageView:
@pytest.fixture(autouse=True)
def mock_external_classes(self, mocker):
self.model = mocker.MagicMock()
self.view = mocker.Mock()
self.urwid = mocker.patch(VIEWS + ".urwid")
@pytest.fixture
def msg_view(self, mocker, msg_box):
mocker.patch(MESSAGEVIEW + ".main_view", return_value=[msg_box])
mocker.patch(MESSAGEVIEW + ".read_message")
mocker.patch(MESSAGEVIEW + ".set_focus")
msg_view = MessageView(self.model, self.view)
msg_view.log = mocker.Mock()
msg_view.body = mocker.Mock()
return msg_view
def test_init(self, mocker, msg_view, msg_box):
assert msg_view.model == self.model
msg_view.set_focus.assert_called_once_with(0)
assert msg_view.old_loading is False
assert msg_view.new_loading is False
@pytest.mark.parametrize("narrow_focus_pos, focus_msg", [(set(), 1), (0, 0)])
def test_main_view(self, mocker, narrow_focus_pos, focus_msg):
mocker.patch(MESSAGEVIEW + ".read_message")
self.urwid.SimpleFocusListWalker.return_value = mocker.Mock()
mocker.patch(MESSAGEVIEW + ".set_focus")
msg_list = ["MSG1", "MSG2"]
mocker.patch(VIEWS + ".create_msg_box_list", return_value=msg_list)
self.model.get_focus_in_current_narrow.return_value = narrow_focus_pos
msg_view = MessageView(self.model, self.view)
assert msg_view.focus_msg == focus_msg
@pytest.mark.parametrize(
"messages_fetched",
[
{},
{201: "M1"},
OrderedDict([(201, "M1"), (202, "M2")]),
],
)
@pytest.mark.parametrize(
"ids_in_narrow",
[
set(),
{0}, # Shouldn't apply to empty log case?
],
)
def test_load_old_messages_empty_log(
self, mocker, msg_view, ids_in_narrow, messages_fetched
):
# Expand parameters to use in test
new_msg_ids = set(messages_fetched.keys())
new_msg_widgets = list(messages_fetched.values())
mocker.patch.object(
msg_view.model,
"get_message_ids_in_current_narrow",
side_effect=[ids_in_narrow, ids_in_narrow | new_msg_ids],
)
create_msg_box_list = mocker.patch(
VIEWS + ".create_msg_box_list", return_value=new_msg_widgets
)
# Specific to this version of the test
msg_view.log = []
msg_view.load_old_messages(0)
assert msg_view.old_loading is False
assert msg_view.log == list(messages_fetched.values()) # code vs orig
if messages_fetched:
create_msg_box_list.assert_called_once_with(msg_view.model, new_msg_ids)
self.model.controller.update_screen.assert_called_once_with()
else:
create_msg_box_list.assert_not_called()
self.model.controller.update_screen.assert_not_called()
self.model.get_messages.assert_called_once_with(
num_before=30, num_after=0, anchor=0
)
@pytest.mark.parametrize(
"messages_fetched",
[
{},
{201: "M1"},
OrderedDict([(201, "M1"), (202, "M2")]),
],
)
@pytest.mark.parametrize(
"top_id_in_narrow, other_ids_in_narrow",
[
(99, set()),
(99, {101}),
(99, {101, 103}),
],
)
def test_load_old_messages_mocked_log(
self, mocker, msg_view, top_id_in_narrow, other_ids_in_narrow, messages_fetched
):
# Expand parameters to use in test
new_msg_ids = set(messages_fetched.keys())
new_msg_widgets = list(messages_fetched.values())
# Parameter constraints
assert top_id_in_narrow not in other_ids_in_narrow
assert top_id_in_narrow not in new_msg_ids
assert other_ids_in_narrow & new_msg_ids == set()
top_widget = mocker.Mock()
top_widget.original_widget.message = {"id": top_id_in_narrow}
ids_in_narrow = {top_id_in_narrow} | other_ids_in_narrow
mocker.patch.object(
msg_view.model,
"get_message_ids_in_current_narrow",
side_effect=[ids_in_narrow, ids_in_narrow | new_msg_ids],
)
create_msg_box_list = mocker.patch(
VIEWS + ".create_msg_box_list",
return_value=(new_msg_widgets + [top_widget]),
)
initial_log = [top_widget] + len(other_ids_in_narrow) * ["existing"]
msg_view.log = initial_log[:]
msg_view.load_old_messages(0)
assert msg_view.old_loading is False
assert msg_view.log == new_msg_widgets + initial_log
if messages_fetched:
create_msg_box_list.assert_called_once_with(
msg_view.model, {top_id_in_narrow} | new_msg_ids
)
self.model.controller.update_screen.assert_called_once_with()
else:
create_msg_box_list.assert_not_called()
self.model.controller.update_screen.assert_not_called()
self.model.get_messages.assert_called_once_with(
num_before=30, num_after=0, anchor=0
)
# FIXME: Improve this test by covering more parameters
@pytest.mark.parametrize(
"ids_in_narrow",
[
({0}),
],
)
def test_load_new_messages_empty_log(self, mocker, msg_view, ids_in_narrow):
mocker.patch.object(
msg_view.model,
"get_message_ids_in_current_narrow",
return_value=ids_in_narrow,
)
create_msg_box_list = mocker.patch(
VIEWS + ".create_msg_box_list", return_value=["M1", "M2"]
)
msg_view.log = []
msg_view.load_new_messages(0)
assert msg_view.new_loading is False
assert msg_view.log == ["M1", "M2"]
create_msg_box_list.assert_called_once_with(
msg_view.model, set(), last_message=None
)
self.model.controller.update_screen.assert_called_once_with()
self.model.get_messages.assert_called_once_with(
num_before=0, num_after=30, anchor=0
)
# FIXME: Improve this test by covering more parameters
@pytest.mark.parametrize(
"ids_in_narrow",
[
({0}),
],
)
def test_load_new_messages_mocked_log(self, mocker, msg_view, ids_in_narrow):
mocker.patch.object(
msg_view.model,
"get_message_ids_in_current_narrow",
return_value=ids_in_narrow,
)
create_msg_box_list = mocker.patch(
VIEWS + ".create_msg_box_list", return_value=["M1", "M2"]
)
msg_view.log = [mocker.Mock()]
msg_view.load_new_messages(0)
assert msg_view.new_loading is False
assert msg_view.log[-2:] == ["M1", "M2"]
expected_last_msg = msg_view.log[0].original_widget.message
create_msg_box_list.assert_called_once_with(
msg_view.model, set(), last_message=expected_last_msg
)
self.model.controller.update_screen.assert_called_once_with()
self.model.get_messages.assert_called_once_with(
num_before=0, num_after=30, anchor=0
)
def test_mouse_event(self, mocker, msg_view, mouse_scroll_event, widget_size):
event, button, keypress = mouse_scroll_event
mocker.patch.object(msg_view, "keypress")
size = widget_size(msg_view)
msg_view.mouse_event(size, event, button, 0, 0, mocker.Mock())
msg_view.keypress.assert_called_once_with(size, keypress)
@pytest.mark.parametrize("key", keys_for_command("GO_DOWN"))
def test_keypress_GO_DOWN(self, mocker, msg_view, key, widget_size):
size = widget_size(msg_view)
msg_view.new_loading = False
mocker.patch(MESSAGEVIEW + ".focus_position", return_value=0)
mocker.patch(MESSAGEVIEW + ".set_focus_valign")
msg_view.log.next_position.return_value = 1
msg_view.keypress(size, key)
msg_view.log.next_position.assert_called_once_with(msg_view.focus_position)
msg_view.set_focus.assert_called_with(1, "above")
msg_view.set_focus_valign.assert_called_once_with("middle")
@pytest.mark.parametrize("view_is_focused", [True, False])
@pytest.mark.parametrize("key", keys_for_command("GO_DOWN"))
def test_keypress_GO_DOWN_exception(
self, mocker, msg_view, key, widget_size, view_is_focused
):
size = widget_size(msg_view)
msg_view.new_loading = False
mocker.patch(MESSAGEVIEW + ".focus_position", return_value=0)
mocker.patch(MESSAGEVIEW + ".set_focus_valign")
msg_view.log.next_position = Exception()
mocker.patch(
MESSAGEVIEW + ".focus",
mocker.MagicMock() if view_is_focused else None,
)
mocker.patch.object(msg_view, "load_new_messages")
return_value = msg_view.keypress(size, key)
if view_is_focused:
msg_view.load_new_messages.assert_called_once_with(
msg_view.focus.original_widget.message["id"],
)
else:
msg_view.load_new_messages.assert_not_called()
assert return_value == key
@pytest.mark.parametrize("key", keys_for_command("GO_UP"))
def test_keypress_GO_UP(self, mocker, msg_view, key, widget_size):
size = widget_size(msg_view)
mocker.patch(MESSAGEVIEW + ".focus_position", return_value=0)
mocker.patch(MESSAGEVIEW + ".set_focus_valign")
msg_view.old_loading = False
msg_view.log.prev_position.return_value = 1
msg_view.keypress(size, key)
msg_view.log.prev_position.assert_called_once_with(msg_view.focus_position)
msg_view.set_focus.assert_called_with(1, "below")
msg_view.set_focus_valign.assert_called_once_with("middle")
@pytest.mark.parametrize("view_is_focused", [True, False])
@pytest.mark.parametrize("key", keys_for_command("GO_UP"))
def test_keypress_GO_UP_exception(
self, mocker, msg_view, key, widget_size, view_is_focused
):
size = widget_size(msg_view)
msg_view.old_loading = False
mocker.patch(MESSAGEVIEW + ".focus_position", return_value=0)
mocker.patch(MESSAGEVIEW + ".set_focus_valign")
msg_view.log.prev_position = Exception()
mocker.patch(
MESSAGEVIEW + ".focus",
mocker.MagicMock() if view_is_focused else None,
)
mocker.patch.object(msg_view, "load_old_messages")
return_value = msg_view.keypress(size, key)
if view_is_focused:
msg_view.load_old_messages.assert_called_once_with(
msg_view.focus.original_widget.message["id"],
)
else:
msg_view.load_old_messages.assert_not_called()
assert return_value == key
def test_read_message(self, mocker, msg_box):
mocker.patch(MESSAGEVIEW + ".main_view", return_value=[msg_box])
self.urwid.SimpleFocusListWalker.return_value = mocker.Mock()
mocker.patch(MESSAGEVIEW + ".set_focus")
mocker.patch(MESSAGEVIEW + ".update_search_box_narrow")
msg_view = MessageView(self.model, self.view)
msg_view.model.is_search_narrow = lambda: False
msg_view.model.controller.in_explore_mode = False
msg_view.log = mocker.Mock()
msg_view.body = mocker.Mock()
msg_w = mocker.MagicMock()
msg_view.model.controller.view = mocker.Mock()
msg_view.model.controller.view.body.focus_col = 1
msg_w.attr_map = {None: "unread"}
msg_w.original_widget.message = {"id": 1}
msg_w.set_attr_map.return_value = None
msg_view.body.get_focus.return_value = (msg_w, 0)
msg_view.body.get_prev.return_value = (None, 1)
msg_view.model.narrow = []
msg_view.model.index = {
"messages": {
1: {
"flags": [],
}
},
"pointer": {"[]": 0},
}
mocker.patch(MESSAGEVIEW + ".focus_position")
msg_view.focus_position = 1
msg_view.model.controller.view.body.focus_col = 1
msg_view.log = list(msg_view.model.index["messages"])
msg_view.read_message()
assert msg_view.update_search_box_narrow.called
assert msg_view.model.index["messages"][1]["flags"] == ["read"]
self.model.mark_message_ids_as_read.assert_called_once_with([1])
def test_message_calls_search_and_header_bar(self, mocker, msg_view):
msg_w = mocker.MagicMock()
msg_w.original_widget.message = {"id": 1}
msg_view.update_search_box_narrow(msg_w.original_widget)
msg_w.original_widget.top_header_bar.assert_called_once_with
(msg_w.original_widget)
msg_w.original_widget.top_search_bar.assert_called_once_with()
def test_read_message_no_msgw(self, mocker, msg_view):
# MSG_W is NONE CASE
msg_view.body.get_focus.return_value = (None, 0)
msg_view.read_message()
self.model.mark_message_ids_as_read.assert_not_called()
def test_read_message_in_explore_mode(self, mocker, msg_box):
mocker.patch(MESSAGEVIEW + ".main_view", return_value=[msg_box])
mocker.patch(MESSAGEVIEW + ".set_focus")
mocker.patch(MESSAGEVIEW + ".update_search_box_narrow")
msg_view = MessageView(self.model, self.view)
msg_w = mocker.Mock()
msg_view.body = mocker.Mock()
msg_view.body.get_focus.return_value = (msg_w, 0)
msg_view.model.is_search_narrow = lambda: False
msg_view.model.controller.in_explore_mode = True
msg_view.read_message()
assert msg_view.update_search_box_narrow.called
assert not self.model.mark_message_ids_as_read.called
def test_read_message_search_narrow(self, mocker, msg_box):
mocker.patch(MESSAGEVIEW + ".main_view", return_value=[msg_box])
mocker.patch(MESSAGEVIEW + ".set_focus")
mocker.patch(MESSAGEVIEW + ".update_search_box_narrow")
msg_view = MessageView(self.model, self.view)
msg_view.model.controller.view = mocker.Mock()
msg_w = mocker.Mock()
msg_view.body = mocker.Mock()
msg_view.body.get_focus.return_value = (msg_w, 0)
msg_view.model.is_search_narrow = lambda: True
msg_view.model.controller.in_explore_mode = False
msg_view.read_message()
assert msg_view.update_search_box_narrow.called
assert not self.model.mark_message_ids_as_read.called
def test_read_message_last_unread_message_focused(
self, mocker, message_fixture, empty_index, msg_box
):
mocker.patch(MESSAGEVIEW + ".main_view", return_value=[msg_box])
mocker.patch(MESSAGEVIEW + ".set_focus")
msg_view = MessageView(self.model, self.view)
msg_view.model.is_search_narrow = lambda: False
msg_view.model.controller.in_explore_mode = False
msg_view.log = [0, 1]
msg_view.body = mocker.Mock()
msg_view.update_search_box_narrow = mocker.Mock()
self.model.controller.view = mocker.Mock()
self.model.controller.view.body.focus_col = 0
self.model.index = empty_index
msg_w = mocker.Mock()
msg_w.attr_map = {None: "unread"}
msg_w.original_widget.message = message_fixture
msg_view.body.get_focus.return_value = (msg_w, 1)
msg_view.body.get_prev.return_value = (None, 0)
msg_view.read_message(1)
self.model.mark_message_ids_as_read.assert_called_once_with(
[message_fixture["id"]]
)
class TestStreamsViewDivider:
def test_init(self):
streams_view_divider = StreamsViewDivider()
assert isinstance(streams_view_divider, Divider)
assert streams_view_divider.stream_id == -1
assert streams_view_divider.stream_name == ""
class TestStreamsView:
@pytest.fixture
def stream_view(self, mocker):
mocker.patch(VIEWS + ".threading.Lock")
self.view = mocker.Mock()
self.stream_search_box = mocker.patch(VIEWS + ".PanelSearchBox")
stream_btn = mocker.Mock()
stream_btn.stream_name = "FOO"
self.streams_btn_list = [stream_btn]
return StreamsView(self.streams_btn_list, view=self.view)
def test_init(self, mocker, stream_view):
assert stream_view.view == self.view
assert stream_view.streams_btn_list == self.streams_btn_list
assert stream_view.stream_search_box
self.stream_search_box.assert_called_once_with(
stream_view, "SEARCH_STREAMS", stream_view.update_streams
)
@pytest.mark.parametrize(
"new_text, expected_log, to_pin",
[
# NOTE: '' represents StreamsViewDivider's stream name.
("f", ["fan", "FOO", "foo", "FOOBAR"], []),
("bar", ["bar"], []),
("foo", ["FOO", "foo", "FOOBAR"], []),
("FOO", ["FOO", "foo", "FOOBAR"], []),
("test", ["test here"], []),
("here", ["test here"], []),
("test here", ["test here"], []),
# With 'foo' pinned.
("f", ["foo", "", "fan", "FOO", "FOOBAR"], ["foo"]),
("FOO", ["foo", "", "FOO", "FOOBAR"], ["foo"]),
# With 'bar' pinned.
("bar", ["bar"], ["bar"]),
("baar", "search error", []),
],
)
def test_update_streams(self, mocker, stream_view, new_text, expected_log, to_pin):
stream_names = ["FOO", "FOOBAR", "foo", "fan", "boo", "BOO", "bar", "test here"]
stream_names.sort(key=lambda stream_name: stream_name.lower())
self.view.pinned_streams = [{"name": name} for name in to_pin]
stream_names.sort(
key=lambda stream_name: stream_name in [stream for stream in to_pin],
reverse=True,
)
self.view.controller.is_in_editor_mode = lambda: True
search_box = stream_view.stream_search_box
stream_view.streams_btn_list = [
mocker.Mock(stream_name=stream_name) for stream_name in stream_names
]
stream_view.update_streams(search_box, new_text)
if expected_log != "search error":
assert [stream.stream_name for stream in stream_view.log] == expected_log
else:
assert hasattr(stream_view.log[0].original_widget, "text")
self.view.controller.update_screen.assert_called_once_with()
def test_mouse_event(self, mocker, stream_view, mouse_scroll_event, widget_size):
event, button, key = mouse_scroll_event
stream_view_keypress = mocker.patch.object(stream_view, "keypress")
size = widget_size(stream_view)
col = 1
row = 1
focus = "WIDGET"
stream_view.mouse_event(size, event, button, col, row, focus)
stream_view_keypress.assert_has_calls(
[mocker.call(size, key)] * SIDE_PANELS_MOUSE_SCROLL_LINES
)
@pytest.mark.parametrize("key", keys_for_command("SEARCH_STREAMS"))
def test_keypress_SEARCH_STREAMS(self, mocker, stream_view, key, widget_size):
size = widget_size(stream_view)
mocker.patch.object(stream_view, "set_focus")
mocker.patch.object(stream_view.stream_search_box, "set_caption")
stream_view.log.extend(["FOO", "foo", "fan", "boo", "BOO"])
stream_view.log.set_focus(3)
stream_view.keypress(size, key)
assert stream_view.focus_index_before_search == 3
stream_view.set_focus.assert_called_once_with("header")
stream_view.stream_search_box.set_caption.assert_called_once_with(" ")
self.view.controller.enter_editor_mode_with.assert_called_once_with(
stream_view.stream_search_box
)
@pytest.mark.parametrize("key", keys_for_command("GO_BACK"))
def test_keypress_GO_BACK(self, mocker, stream_view, key, widget_size):
size = widget_size(stream_view)
mocker.patch.object(stream_view, "set_focus")
mocker.patch(VIEWS + ".urwid.Frame.keypress")
mocker.patch.object(stream_view.stream_search_box, "reset_search_text")
stream_view.streams_btn_list = ["FOO", "foo", "fan", "boo", "BOO"]
stream_view.focus_index_before_search = 3
# Simulate search
stream_view.log.clear()
stream_view.log.extend(stream_view.streams_btn_list[3])
stream_view.log.set_focus(0)
stream_view.keypress(size, primary_key_for_command("GO_DOWN"))
assert stream_view.log.get_focus()[1] != stream_view.focus_index_before_search
# Exit search
stream_view.keypress(size, key)
# Check state reset after search
stream_view.set_focus.assert_called_once_with("body")
assert stream_view.stream_search_box.reset_search_text.called
assert stream_view.log == stream_view.streams_btn_list
assert stream_view.log.get_focus()[1] == stream_view.focus_index_before_search
class TestTopicsView:
@pytest.fixture
def topic_view(self, mocker, stream_button):
self.stream_button = stream_button
mocker.patch(VIEWS + ".threading.Lock")
self.topic_search_box = mocker.patch(VIEWS + ".PanelSearchBox")
self.view = mocker.Mock()
self.view.controller = mocker.Mock()
topic_btn = mocker.Mock()
topic_btn.caption = "BOO"
self.topics_btn_list = [topic_btn]
self.header_list = mocker.patch(VIEWS + ".urwid.Pile")
self.divider = mocker.patch(VIEWS + ".urwid.Divider")
return TopicsView(self.topics_btn_list, self.view, self.stream_button)
def test_init(self, mocker, topic_view):
assert topic_view.stream_button == self.stream_button
assert topic_view.view == self.view
assert topic_view.topic_search_box
self.topic_search_box.assert_called_once_with(
topic_view, "SEARCH_TOPICS", topic_view.update_topics
)
self.header_list.assert_called_once_with(
[topic_view.stream_button, self.divider("─"), topic_view.topic_search_box]
)
@pytest.mark.parametrize(
"new_text, expected_log",
[
("f", ["FOO", "FOOBAR", "foo", "fan"]),
("a", ["FOOBAR", "fan", "bar"]),
("bar", ["FOOBAR", "bar"]),
("foo", ["FOO", "FOOBAR", "foo"]),
("FOO", ["FOO", "FOOBAR", "foo"]),
("(no", ["(no topic)"]),
("topic", ["(no topic)"]),
("cc", "search error"),
],
)
def test_update_topics(self, mocker, topic_view, new_text, expected_log):
topic_names = ["FOO", "FOOBAR", "foo", "fan", "boo", "BOO", "bar", "(no topic)"]
self.view.controller.is_in_editor_mode = lambda: True
new_text = new_text
search_box = topic_view.topic_search_box
topic_view.topics_btn_list = [
mocker.Mock(topic_name=topic_name) for topic_name in topic_names
]
topic_view.update_topics(search_box, new_text)
if expected_log != "search error":
assert [topic.topic_name for topic in topic_view.log] == expected_log
else:
assert hasattr(topic_view.log[0].original_widget, "text")
self.view.controller.update_screen.assert_called_once_with()
@pytest.mark.parametrize(
"topic_name, topic_initial_log, topic_final_log",
[
("TOPIC3", ["TOPIC2", "TOPIC3", "TOPIC1"], ["TOPIC3", "TOPIC2", "TOPIC1"]),
("TOPIC1", ["TOPIC1", "TOPIC2", "TOPIC3"], ["TOPIC1", "TOPIC2", "TOPIC3"]),
(
"TOPIC4",
["TOPIC1", "TOPIC2", "TOPIC3"],
["TOPIC4", "TOPIC1", "TOPIC2", "TOPIC3"],
),
("TOPIC1", [], ["TOPIC1"]),
],
ids=[
"reorder_topic3",
"topic1_discussion_continues",
"new_topic4",
"first_topic_1",
],
)
def test_update_topics_list(
self, mocker, topic_view, topic_name, topic_initial_log, topic_final_log
):
mocker.patch(SUBDIR + ".buttons.TopButton.__init__", return_value=None)
set_focus_valign = mocker.patch(VIEWS + ".urwid.ListBox.set_focus_valign")
topic_view.view.controller.model.stream_dict = {86: {"name": "PTEST"}}
topic_view.view.controller.model.is_muted_topic = mocker.Mock(
return_value=False
)
topic_view.log = [
mocker.Mock(topic_name=topic_name) for topic_name in topic_initial_log
]
topic_view.update_topics_list(86, topic_name, 1001)
assert [topic.topic_name for topic in topic_view.log] == topic_final_log
set_focus_valign.assert_called_once_with("bottom")
@pytest.mark.parametrize("key", keys_for_command("SEARCH_TOPICS"))
def test_keypress_SEARCH_TOPICS(self, mocker, topic_view, key, widget_size):
size = widget_size(topic_view)
mocker.patch(VIEWS + ".TopicsView.set_focus")
mocker.patch.object(topic_view.topic_search_box, "set_caption")
topic_view.log.extend(["FOO", "foo", "fan", "boo", "BOO"])
topic_view.log.set_focus(3)
topic_view.keypress(size, key)
topic_view.set_focus.assert_called_once_with("header")
topic_view.header_list.set_focus.assert_called_once_with(2)
assert topic_view.focus_index_before_search == 3
topic_view.topic_search_box.set_caption.assert_called_once_with(" ")
self.view.controller.enter_editor_mode_with.assert_called_once_with(
topic_view.topic_search_box
)
@pytest.mark.parametrize("key", keys_for_command("GO_BACK"))
def test_keypress_GO_BACK(self, mocker, topic_view, key, widget_size):
size = widget_size(topic_view)
mocker.patch(VIEWS + ".TopicsView.set_focus")
mocker.patch(VIEWS + ".urwid.Frame.keypress")
mocker.patch.object(topic_view.topic_search_box, "reset_search_text")
topic_view.topics_btn_list = ["FOO", "foo", "fan", "boo", "BOO"]
topic_view.focus_index_before_search = 3
# Simulate search
topic_view.log.clear()
topic_view.log.extend(topic_view.topics_btn_list[3])
topic_view.log.set_focus(0)
topic_view.keypress(size, primary_key_for_command("GO_DOWN"))
assert topic_view.log.get_focus()[1] != topic_view.focus_index_before_search
# Exit search
topic_view.keypress(size, key)
# Check state reset after search
topic_view.set_focus.assert_called_once_with("body")
assert topic_view.topic_search_box.reset_search_text.called
assert topic_view.log == topic_view.topics_btn_list
assert topic_view.log.get_focus()[1] == topic_view.focus_index_before_search
def test_mouse_event(self, mocker, topic_view, mouse_scroll_event, widget_size):
event, button, key = mouse_scroll_event
topic_view_keypress = mocker.patch.object(topic_view, "keypress")
size = widget_size(topic_view)
col = 1
row = 1
focus = "WIDGET"
topic_view.mouse_event(size, event, button, col, row, focus)
topic_view_keypress.assert_has_calls(
[mocker.call(size, key)] * SIDE_PANELS_MOUSE_SCROLL_LINES
)
class TestUsersView:
@pytest.fixture
def user_view(self, mocker):
mocker.patch(VIEWS + ".urwid.SimpleFocusListWalker", return_value=[])
controller = mocker.Mock()
return UsersView(controller, "USER_BTN_LIST")
def test_mouse_event(self, mocker, user_view, mouse_scroll_event, widget_size):
event, button, key = mouse_scroll_event
user_view_keypress = mocker.patch.object(user_view, "keypress")
size = widget_size(user_view)
col = 1
row = 1
focus = "WIDGET"
user_view.mouse_event(size, event, button, col, row, focus)
user_view_keypress.assert_has_calls(
[mocker.call(size, key)] * SIDE_PANELS_MOUSE_SCROLL_LINES
)
def test_mouse_event_left_click(
self, mocker, user_view, widget_size, compose_box_is_open
):
super_mouse_event = mocker.patch(VIEWS + ".urwid.ListBox.mouse_event")
user_view.controller.is_in_editor_mode.return_value = compose_box_is_open
size = widget_size(user_view)
focus = mocker.Mock()
user_view.mouse_event(size, "mouse press", 1, 1, 1, focus)
if compose_box_is_open:
super_mouse_event.assert_not_called()
else:
super_mouse_event.assert_called_once_with(
size, "mouse press", 1, 1, 1, focus
)
@pytest.mark.parametrize(
"event, button",
[
("mouse release", 0),
("mouse press", 3),
("mouse release", 4),
],
ids=[
"unsupported_mouse_release_action",
"unsupported_right_click_mouse_press_action",
"invalid_event_button_combination",
],
)
def test_mouse_event_invalid(self, user_view, event, button, widget_size):
size = widget_size(user_view)
col = 1
row = 1
focus = "WIDGET"
return_value = user_view.mouse_event(size, event, button, col, row, focus)
assert return_value is False
class TestMiddleColumnView:
@pytest.fixture(autouse=True)
def mock_external_classes(self, mocker):
mocker.patch(MESSAGEVIEW + "", return_value="MSG_LIST")
self.model = mocker.Mock()
self.view = mocker.Mock()
self.write_box = mocker.Mock()
self.search_box = mocker.Mock()
self.super = mocker.patch(VIEWS + ".urwid.Frame.__init__")
self.super_keypress = mocker.patch(VIEWS + ".urwid.Frame.keypress")
self.model.controller == mocker.Mock()
@pytest.fixture
def mid_col_view(self):
return MiddleColumnView(self.view, self.model, self.write_box, self.search_box)
def test_init(self, mid_col_view):
assert mid_col_view.model == self.model
assert mid_col_view.controller == self.model.controller
assert mid_col_view.last_unread_topic is None
assert mid_col_view.last_unread_pm is None
assert mid_col_view.search_box == self.search_box
assert self.view.message_view == "MSG_LIST"
self.super.assert_called_once_with(
"MSG_LIST", header=self.search_box, footer=self.write_box
)
def test_get_next_unread_topic(self, mid_col_view):
mid_col_view.model.unread_counts = {"unread_topics": {1: 1, 2: 1}}
return_value = mid_col_view.get_next_unread_topic()
assert return_value == 1
assert mid_col_view.last_unread_topic == 1
def test_get_next_unread_topic_again(self, mid_col_view):
mid_col_view.model.unread_counts = {"unread_topics": {1: 1, 2: 1}}
mid_col_view.last_unread_topic = 1
return_value = mid_col_view.get_next_unread_topic()
assert return_value == 2
assert mid_col_view.last_unread_topic == 2
def test_get_next_unread_topic_no_unread(self, mid_col_view):
mid_col_view.model.unread_counts = {"unread_topics": {}}
return_value = mid_col_view.get_next_unread_topic()
assert return_value is None
assert mid_col_view.last_unread_topic is None
def test_get_next_unread_pm(self, mid_col_view):
mid_col_view.model.unread_counts = {"unread_pms": {1: 1, 2: 1}}
return_value = mid_col_view.get_next_unread_pm()
assert return_value == 1
assert mid_col_view.last_unread_pm == 1
def test_get_next_unread_pm_again(self, mid_col_view):
mid_col_view.model.unread_counts = {"unread_pms": {1: 1, 2: 1}}
mid_col_view.last_unread_pm = 1
return_value = mid_col_view.get_next_unread_pm()
assert return_value == 2
assert mid_col_view.last_unread_pm == 2
def test_get_next_unread_pm_no_unread(self, mid_col_view):
mid_col_view.model.unread_counts = {"unread_pms": {}}
return_value = mid_col_view.get_next_unread_pm()
assert return_value is None
assert mid_col_view.last_unread_pm is None
@pytest.mark.parametrize("key", keys_for_command("GO_BACK"))
def test_keypress_GO_BACK(self, mid_col_view, mocker, key, widget_size):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".header")
mocker.patch(MIDCOLVIEW + ".footer")
mocker.patch(MIDCOLVIEW + ".set_focus")
mid_col_view.keypress(size, key)
mid_col_view.header.keypress.assert_called_once_with(size, key)
mid_col_view.footer.keypress.assert_called_once_with(size, key)
mid_col_view.set_focus.assert_called_once_with("body")
self.super_keypress.assert_called_once_with(size, key)
@pytest.mark.parametrize("key", keys_for_command("SEARCH_MESSAGES"))
def test_keypress_focus_header(self, mid_col_view, mocker, key, widget_size):
size = widget_size(mid_col_view)
mid_col_view.focus_part = "header"
mid_col_view.keypress(size, key)
self.super_keypress.assert_called_once_with(size, key)
@pytest.mark.parametrize("key", keys_for_command("SEARCH_MESSAGES"))
def test_keypress_SEARCH_MESSAGES(self, mid_col_view, mocker, key, widget_size):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".set_focus")
mid_col_view.keypress(size, key)
mid_col_view.controller.enter_editor_mode_with.assert_called_once_with(
mid_col_view.search_box
)
mid_col_view.set_focus.assert_called_once_with("header")
@pytest.mark.parametrize("reply_message_key", keys_for_command("REPLY_MESSAGE"))
def test_keypress_REPLY_MESSAGE(
self, mid_col_view, mocker, widget_size, reply_message_key
):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".body")
mocker.patch(MIDCOLVIEW + ".footer")
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".set_focus")
mid_col_view.keypress(size, reply_message_key)
mid_col_view.body.keypress.assert_called_once_with(size, reply_message_key)
mid_col_view.set_focus.assert_called_once_with("footer")
assert mid_col_view.footer.focus_position == 1
@pytest.mark.parametrize("key", keys_for_command("STREAM_MESSAGE"))
def test_keypress_STREAM_MESSAGE(self, mid_col_view, mocker, key, widget_size):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".body")
mocker.patch(MIDCOLVIEW + ".footer")
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".set_focus")
mid_col_view.keypress(size, key)
mid_col_view.body.keypress.assert_called_once_with(size, key)
mid_col_view.set_focus.assert_called_once_with("footer")
assert mid_col_view.footer.focus_position == 0
@pytest.mark.parametrize("key", keys_for_command("REPLY_AUTHOR"))
def test_keypress_REPLY_AUTHOR(self, mid_col_view, mocker, key, widget_size):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".body")
mocker.patch(MIDCOLVIEW + ".footer")
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".set_focus")
mid_col_view.keypress(size, key)
mid_col_view.body.keypress.assert_called_once_with(size, key)
mid_col_view.set_focus.assert_called_once_with("footer")
assert mid_col_view.footer.focus_position == 1
@pytest.mark.parametrize("key", keys_for_command("NEXT_UNREAD_TOPIC"))
def test_keypress_NEXT_UNREAD_TOPIC_stream(
self, mid_col_view, mocker, widget_size, key
):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(
MIDCOLVIEW + ".get_next_unread_topic",
return_value=("1", "topic"),
)
mid_col_view.model.stream_dict = {"1": {"name": "stream"}}
mid_col_view.keypress(size, key)
mid_col_view.get_next_unread_topic.assert_called_once_with()
mid_col_view.controller.narrow_to_topic.assert_called_once_with(
stream_name="stream", topic_name="topic"
)
@pytest.mark.parametrize("key", keys_for_command("NEXT_UNREAD_TOPIC"))
def test_keypress_NEXT_UNREAD_TOPIC_no_stream(
self, mid_col_view, mocker, widget_size, key
):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".get_next_unread_topic", return_value=None)
return_value = mid_col_view.keypress(size, key)
assert return_value == key
@pytest.mark.parametrize("key", keys_for_command("NEXT_UNREAD_PM"))
def test_keypress_NEXT_UNREAD_PM_stream(
self, mid_col_view, mocker, key, widget_size
):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".get_next_unread_pm", return_value=1)
mid_col_view.model.user_id_email_dict = {1: "EMAIL"}
mid_col_view.keypress(size, key)
mid_col_view.controller.narrow_to_user.assert_called_once_with(
recipient_emails=["EMAIL"],
contextual_message_id=1,
)
@pytest.mark.parametrize("key", keys_for_command("NEXT_UNREAD_PM"))
def test_keypress_NEXT_UNREAD_PM_no_pm(
self, mid_col_view, mocker, key, widget_size
):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".get_next_unread_pm", return_value=None)
return_value = mid_col_view.keypress(size, key)
assert return_value == key
@pytest.mark.parametrize("key", keys_for_command("PRIVATE_MESSAGE"))
def test_keypress_PRIVATE_MESSAGE(self, mid_col_view, mocker, key, widget_size):
size = widget_size(mid_col_view)
mocker.patch(MIDCOLVIEW + ".focus_position")
mocker.patch(MIDCOLVIEW + ".get_next_unread_pm", return_value=None)
mid_col_view.footer = mocker.Mock()
return_value = mid_col_view.keypress(size, key)
mid_col_view.footer.private_box_view.assert_called_once_with()
assert mid_col_view.footer.focus_position == 0
assert return_value == key
class TestRightColumnView:
@pytest.fixture(autouse=True)
def mock_external_classes(self, mocker):
self.view = mocker.Mock()
self.user_search = mocker.patch(VIEWS + ".PanelSearchBox")
self.connect_signal = mocker.patch(VIEWS + ".urwid.connect_signal")
self.line_box = mocker.patch(VIEWS + ".urwid.LineBox")
self.thread = mocker.patch(VIEWS + ".threading")
self.super = mocker.patch(VIEWS + ".urwid.Frame.__init__")
self.view.model.unread_counts = { # Minimal, though an UnreadCounts
"unread_pms": {
1: 1,
2: 1,
}
}
@pytest.fixture
def right_col_view(self, mocker):
mocker.patch(VIEWS + ".RightColumnView.users_view")
return RightColumnView(self.view)
def test_init(self, right_col_view):
assert right_col_view.view == self.view
assert right_col_view.user_search == self.user_search(right_col_view)
assert right_col_view.view.user_search == right_col_view.user_search
self.thread.Lock.assert_called_with()
assert right_col_view.search_lock == self.thread.Lock()
self.super.assert_called_once_with(
right_col_view.users_view(),
header=self.line_box(right_col_view.user_search),
)
def test_update_user_list_editor_mode(self, mocker, right_col_view):
right_col_view.view.controller.update_screen = mocker.Mock()
right_col_view.view.controller.is_in_editor_mode = lambda: False
right_col_view.update_user_list("SEARCH_BOX", "NEW_TEXT")
right_col_view.view.controller.update_screen.assert_not_called()
@pytest.mark.parametrize(
"search_string, assert_list, match_return_value",
[("U", ["USER1", "USER2"], True), ("F", [], False)],
ids=[
"user match",
"no user match",
],
)
def test_update_user_list(
self, right_col_view, mocker, search_string, assert_list, match_return_value
):
right_col_view.view.controller.is_in_editor_mode = lambda: True
self.view.users = ["USER1", "USER2"]
mocker.patch(VIEWS + ".match_user", return_value=match_return_value)
mocker.patch(VIEWS + ".UsersView")
list_w = mocker.patch(VIEWS + ".urwid.SimpleFocusListWalker")
set_body = mocker.patch(VIEWS + ".urwid.Frame.set_body")
right_col_view.update_user_list("SEARCH_BOX", search_string)
if assert_list:
right_col_view.users_view.assert_called_with(assert_list)
set_body.assert_called_once_with(right_col_view.body)
def test_update_user_presence(self, right_col_view, mocker, user_list):
set_body = mocker.patch(VIEWS + ".urwid.Frame.set_body")
right_col_view.update_user_list(user_list=user_list)
right_col_view.users_view.assert_called_with(user_list)
set_body.assert_called_once_with(right_col_view.body)
@pytest.mark.parametrize(
"users, users_btn_len, editor_mode, status",
[
(None, 1, False, "active"),
(
[
{
"user_id": 2,
"status": "inactive",
}
],
1,
True,
"active",
),
(None, 0, False, "inactive"),
],
)
def test_users_view(self, users, users_btn_len, editor_mode, status, mocker):
self.view.users = [{"user_id": 1, "status": status}]
self.view.controller.is_in_editor_mode = lambda: editor_mode
user_btn = mocker.patch(VIEWS + ".UserButton")
users_view = mocker.patch(VIEWS + ".UsersView")
right_col_view = RightColumnView(self.view)
if status != "inactive":
unread_counts = right_col_view.view.model.unread_counts
user_btn.assert_called_once_with(
user=self.view.users[0],
controller=self.view.controller,
view=self.view,
color="user_" + self.view.users[0]["status"],
state_marker=STATUS_ACTIVE,
count=1,
is_current_user=False,
)
users_view.assert_called_once_with(
self.view.controller, right_col_view.users_btn_list
)
assert len(right_col_view.users_btn_list) == users_btn_len
@pytest.mark.parametrize("key", keys_for_command("SEARCH_PEOPLE"))
def test_keypress_SEARCH_PEOPLE(self, right_col_view, mocker, key, widget_size):
size = widget_size(right_col_view)
mocker.patch(VIEWS + ".RightColumnView.set_focus")
mocker.patch.object(right_col_view.user_search, "set_caption")
right_col_view.keypress(size, key)
right_col_view.set_focus.assert_called_once_with("header")
right_col_view.user_search.set_caption.assert_called_once_with(" ")
self.view.controller.enter_editor_mode_with.assert_called_once_with(
right_col_view.user_search
)
@pytest.mark.parametrize("key", keys_for_command("GO_BACK"))
def test_keypress_GO_BACK(self, right_col_view, mocker, key, widget_size):
size = widget_size(right_col_view)
mocker.patch(VIEWS + ".UsersView")
mocker.patch(VIEWS + ".RightColumnView.set_focus")
mocker.patch(VIEWS + ".RightColumnView.set_body")
mocker.patch.object(right_col_view.user_search, "reset_search_text")
right_col_view.users_btn_list = []
right_col_view.keypress(size, key)
right_col_view.set_body.assert_called_once_with(right_col_view.body)
right_col_view.set_focus.assert_called_once_with("body")
assert right_col_view.user_search.reset_search_text.called
class TestLeftColumnView:
@pytest.fixture(autouse=True)
def mock_external_classes(self, mocker):
self.view = mocker.Mock()
self.view.model = mocker.Mock()
self.view.model.unread_counts = { # Minimal, though an UnreadCounts
"all_msg": 2,
"all_pms": 0,
"streams": {
86: 1,
14: 1,
99: 1,
1: 1,
2: 1,
1000: 1,
},
"unread_topics": {
(205, "TOPIC1"): 34,
(205, "TOPIC2"): 100,
},
"all_mentions": 1,
}
self.view.model.initial_data = {
"starred_messages": [1117554, 1117558, 1117574],
}
self.view.controller = mocker.Mock()
self.super_mock = mocker.patch(VIEWS + ".urwid.Pile.__init__")
def test_menu_view(self, mocker):
self.streams_view = mocker.patch(VIEWS + ".LeftColumnView.streams_view")
home_button = mocker.patch(VIEWS + ".HomeButton")
pm_button = mocker.patch(VIEWS + ".PMButton")
starred_button = mocker.patch(VIEWS + ".StarredButton")
mocker.patch(VIEWS + ".urwid.ListBox")
mocker.patch(VIEWS + ".urwid.SimpleFocusListWalker")
mocker.patch(VIEWS + ".StreamButton.mark_muted")
left_col_view = LeftColumnView(self.view)
home_button.assert_called_once_with(
controller=left_col_view.controller, count=2
)
pm_button.assert_called_once_with(controller=left_col_view.controller, count=0)
starred_button.assert_called_once_with(
controller=left_col_view.controller, count=3
)
@pytest.mark.parametrize("pinned", powerset([1, 2, 99, 1000]))
def test_streams_view(self, mocker, streams, pinned):
self.view.unpinned_streams = [s for s in streams if s["id"] not in pinned]
self.view.pinned_streams = [s for s in streams if s["id"] in pinned]
stream_button = mocker.patch(VIEWS + ".StreamButton")
stream_view = mocker.patch(VIEWS + ".StreamsView")
line_box = mocker.patch(VIEWS + ".urwid.LineBox")
divider = mocker.patch(VIEWS + ".StreamsViewDivider")
left_col_view = LeftColumnView(self.view)
if pinned:
assert divider.called
else:
divider.assert_not_called()
stream_button.assert_has_calls(
[
mocker.call(
properties=stream,
controller=self.view.controller,
view=self.view,
count=1,
)
for stream in (self.view.pinned_streams + self.view.unpinned_streams)
]
)
def test_topics_view(self, mocker, stream_button):
mocker.patch(VIEWS + ".LeftColumnView.streams_view")
mocker.patch(VIEWS + ".LeftColumnView.menu_view")
topic_button = mocker.patch(VIEWS + ".TopicButton")
topics_view = mocker.patch(VIEWS + ".TopicsView")
line_box = mocker.patch(VIEWS + ".urwid.LineBox")
topic_list = ["TOPIC1", "TOPIC2", "TOPIC3"]
unread_count_list = [34, 100, 0]
self.view.model.topics_in_stream = mocker.Mock(return_value=topic_list)
left_col_view = LeftColumnView(self.view)
left_col_view.topics_view(stream_button)
self.view.model.topics_in_stream.assert_called_once_with(205)
topic_button.assert_has_calls(
[
mocker.call(
stream_id=205,
topic=topic,
controller=self.view.controller,
view=self.view,
count=count,
)
for topic, count in zip(topic_list, unread_count_list)
]
)
class TestTabView:
@pytest.fixture
def tab_view(self):
return TabView("❰ TEST ❱")
@pytest.mark.parametrize(
"expected_output",
[
[
b" ",
b" \xe2\x9d\xb0 ",
b" ",
b" T ",
b" E ",
b" S ",
b" T ",
b" ",
b" \xe2\x9d\xb1 ",
b" ",
]
],
)
@pytest.mark.parametrize("TAB_WIDTH, TAB_HEIGHT", [(3, 10)])
def test_tab_render(self, tab_view, TAB_WIDTH, TAB_HEIGHT, expected_output):
render_output = tab_view._w.render((TAB_WIDTH, TAB_HEIGHT)).text
assert render_output == expected_output
class TestMessageBox:
@pytest.fixture(autouse=True)
def mock_external_classes(self, mocker, initial_index):
self.model = mocker.MagicMock()
self.model.index = initial_index
@pytest.mark.parametrize(
"message_type, set_fields",
[
("stream", [("stream_name", ""), ("stream_id", None), ("topic_name", "")]),
("private", [("email", ""), ("user_id", None)]),
],
)
def test_init(self, mocker, message_type, set_fields):
mocker.patch.object(MessageBox, "main_view")
message = dict(
display_recipient=[
{"id": 7, "email": "[email protected]", "full_name": "Boo is awesome"}
],
stream_id=5,
subject="hi",
sender_email="[email protected]",
sender_id=4209,
type=message_type,
)
msg_box = MessageBox(message, self.model, None)
assert msg_box.last_message == defaultdict(dict)
for field, invalid_default in set_fields:
assert getattr(msg_box, field) != invalid_default
if message_type == "stream":
assert msg_box.topic_links == OrderedDict()
assert msg_box.message_links == OrderedDict()
assert msg_box.time_mentions == list()
def test_init_fails_with_bad_message_type(self):
message = dict(type="BLAH")
with pytest.raises(RuntimeError):
msg_box = MessageBox(message, self.model, None)
def test_private_message_to_self(self, mocker):
message = dict(
type="private",
display_recipient=[
{"full_name": "Foo Foo", "email": "[email protected]", "id": None}
],
sender_id=9,
content="<p> self message. </p>",
sender_full_name="Foo Foo",
sender_email="[email protected]",
timestamp=150989984,
)
self.model.user_email = "[email protected]"
mocker.patch(
BOXES + ".MessageBox._is_private_message_to_self", return_value=True
)
mocker.patch.object(MessageBox, "main_view")
msg_box = MessageBox(message, self.model, None)
assert msg_box.recipient_emails == ["[email protected]"]
msg_box._is_private_message_to_self.assert_called_once_with()
@pytest.mark.parametrize(
"content, expected_markup",
[
case("", [], id="empty"),
case("<p>hi</p>", ["", "hi"], id="p"),
case(
'<span class="user-mention">@Bob Smith',
[("msg_mention", "@Bob Smith")],
id="user-mention",
),
case(
'<span class="user-group-mention">@A Group',
[("msg_mention", "@A Group")],
id="group-mention",
),
case("<code>some code", [("msg_code", "some code")], id="code"),
case(
'<div class="codehilite" data-code-language="python">'
"<pre><span></span>"
"<code><span>def</span> <span>func</span><span>():</span>\n"
' <span class="pg">print</span><span>()</span><span></span>\n'
"\n"
"<span>class</span> <span>New</span><span>:</span>\n"
' <span>name</span> <span>=</span> <span>"name"</span>\n'
"</code></pre></div>",
[
("pygments:w", "def"),
("pygments:w", " "),
("pygments:w", "func"),
("pygments:w", "():"),
("pygments:w", "\n" " "),
("pygments:pg", "print"),
("pygments:w", "()"),
("pygments:w", "\n" "\n"),
("pygments:w", "class"),
("pygments:w", " "),
("pygments:w", "New"),
("pygments:w", ":"),
("pygments:w", "\n" " "),
("pygments:w", "name"),
("pygments:w", " "),
("pygments:w", "="),
("pygments:w", " "),
("pygments:w", '"name"'),
("pygments:w", "\n"),
],
id="codehilite-code",
),
case(
'<div class="codehilite" data-code-language="python">'
"<pre><span></span>"
"<span>def</span> <span>func</span><span>():</span>\n"
' <span class="pg">print</span><span>()</span>\n'
"\n"
"<span>class</span> <span>New</span><span>:</span>\n"
' <span>name</span> <span>=</span> <span>"name"</span>\n'
"</pre></div>",
[
("pygments:w", "def"),
("pygments:w", " "),
("pygments:w", "func"),
("pygments:w", "():"),
("pygments:w", "\n" " "),
("pygments:pg", "print"),
("pygments:w", "()"),
("pygments:w", "\n" "\n"),
("pygments:w", "class"),
("pygments:w", " "),
("pygments:w", "New"),
("pygments:w", ":"),
("pygments:w", "\n" " "),
("pygments:w", "name"),
("pygments:w", " "),
("pygments:w", "="),
("pygments:w", " "),
("pygments:w", '"name"'),
("pygments:w", "\n"),
],
id="codehilite-code-old",
),
case(
'<div class="codehilite">'
"<pre><span></span>"
"<code>This is a\n"
" Plain\n"
"\n"
" Codeblock\n"
"</code></pre></div>",
[
("pygments:w", "This is a\n Plain\n\n Codeblock\n"),
],
id="codehilite-plain-text-codeblock",
),
case(
'<div class="codehilite">'
"<pre><span></span>"
"This is a\n"
" Plain\n"
"\n"
" Codeblock\n"
"</pre></div>",
[
("pygments:w", "This is a\n Plain\n\n Codeblock\n"),
],
id="codehilite-plain-text-codeblock-old",
),
case("<strong>Something", [("msg_bold", "Something")], id="strong"),
case("<em>Something", [("msg_bold", "Something")], id="em"),
case("<blockquote>stuff", [("msg_quote", ["", "stuff"])], id="blockquote"),
# FIXME Unsupported:
case(
'<div class="message_embed">',
["[EMBEDDED CONTENT NOT RENDERED]"],
id="embedded_content",
),
# TODO: Generate test cases to work with both soup2markup and
# footlinks_view.
case(
'<a href="http://foo">Foo</a><a href="https://bar.org">Bar</a>',
[
("msg_link", "Foo"),
" ",
("msg_link_index", "[1]"),
("msg_link", "Bar"),
" ",
("msg_link_index", "[2]"),
],
id="link_two",
),
case(
'<a href="http://foo">Foo</a><a href="http://foo">Another foo</a>',
[
("msg_link", "Foo"),
" ",
("msg_link_index", "[1]"),
("msg_link", "Another foo"),
" ",
("msg_link_index", "[1]"),
],
id="link_samelinkdifferentname",
),
case(
'<a href="http://foo">Foo</a><a href="https://bar.org">Bar</a>'
'<a href="http://foo">Foo</a><a href="https://bar.org">Bar</a>',
[
("msg_link", "Foo"),
" ",
("msg_link_index", "[1]"),
("msg_link", "Bar"),
" ",
("msg_link_index", "[2]"),
("msg_link", "Foo"),
" ",
("msg_link_index", "[1]"),
("msg_link", "Bar"),
" ",
("msg_link_index", "[2]"),
],
id="link_duplicatelink",
),
case(
'<a href="http://baz.com/">http://baz.com/</a>',
[("msg_link", "http://baz.com"), " ", ("msg_link_index", "[1]")],
id="link_trailingslash",
),
case(
'<a href="http://foo.com/">Foo</a><a href="http://foo.com">Foo</a>',
[
("msg_link", "Foo"),
" ",
("msg_link_index", "[1]"),
("msg_link", "Foo"),
" ",
("msg_link_index", "[1]"),
],
id="link_trailingslashduplicatelink",
),
case(
'<a href="http://foo">http://foo</a>',
[("msg_link", "http://foo"), " ", ("msg_link_index", "[1]")],
id="link_sametext",
),
case(
'<a href="http://foo/bar.png">http://foo/bar.png</a>',
[("msg_link", "bar.png"), " ", ("msg_link_index", "[1]")],
id="link_sameimage",
),
case(
'<a href="http://foo">bar</a>',
[("msg_link", "bar"), " ", ("msg_link_index", "[1]")],
id="link_differenttext",
),
case(
'<a href="/user_uploads/blah.gif"',
[("msg_link", "blah.gif"), " ", ("msg_link_index", "[1]")],
id="link_userupload",
),
case(
'<a href="/api"',
[("msg_link", "/api"), " ", ("msg_link_index", "[1]")],
id="link_api",
),
case(
f'<a href="some/relative_url">{SERVER_URL}/some/relative_url</a>',
[("msg_link", "/some/relative_url"), " ", ("msg_link_index", "[1]")],
id="link_serverrelative_same",
),
case(
'<a href="http://foo.com/bar">foo.com/bar</a>',
[("msg_link", "foo.com"), " ", ("msg_link_index", "[1]")],
id="link_textwithoutscheme",
),
case(
'<a href="http://foo.com">foo.com</a>'
'<a href="http://foo.com">http://foo.com</a>'
'<a href="https://foo.com">https://foo.com</a>'
'<a href="http://foo.com">Text</a>',
[
("msg_link", "foo.com"),
" ",
("msg_link_index", "[1]"),
("msg_link", "http://foo.com"),
" ",
("msg_link_index", "[1]"),
("msg_link", "https://foo.com"),
" ",
("msg_link_index", "[2]"),
("msg_link", "Text"),
" ",
("msg_link_index", "[1]"),
],
id="link_differentscheme",
),
case("<li>Something", ["\n", " \N{BULLET} ", "", "Something"], id="li"),
case("<li></li>", ["\n", " \N{BULLET} ", ""], id="empty_li"),
case(
"<li>\n<p>Something",
["\n", " \N{BULLET} ", "", "", "", "Something"],
id="li_with_li_p_newline",
),
case(
"<li>Something<li>else",
[
"\n",
" \N{BULLET} ",
"",
"Something",
"\n",
" \N{BULLET} ",
"",
"else",
],
id="two_li",
),
case(
"<li>\n<p>Something</p>\n</li><li>else",
[
"\n",
" \N{BULLET} ",
"",
"",
"",
"Something",
"",
"\n",
" \N{BULLET} ",
"",
"else",
],
id="two_li_with_li_p_newlines",
),
case(
"<ul><li>Something<ul><li>nested",
[
"",
" \N{BULLET} ",
"",
"Something",
"",
"\n",
" \N{RING OPERATOR} ",
"",
"nested",
],
id="li_nested",
),
case(
"<ul><li>Something<ul><li>nested<ul><li>a<ul><li>lot",
[
"",
" \N{BULLET} ",
"",
"Something",
"",
"\n",
" \N{RING OPERATOR} ",
"",
"nested",
"",
"\n",
" \N{HYPHEN} ",
"",
"a",
"",
"\n",
" \N{BULLET} ",
"",
"lot",
],
id="li_heavily_nested",
),
case("<br>", [], id="br"),
case("<br/>", [], id="br2"),
case("<hr>", ["[RULER NOT RENDERED]"], id="hr"),
case("<hr/>", ["[RULER NOT RENDERED]"], id="hr2"),
case("<img>", ["[IMAGE NOT RENDERED]"], id="img"),
case("<img/>", ["[IMAGE NOT RENDERED]"], id="img2"),
case(
"<table><thead><tr><th>Firstname</th><th>Lastname</th></tr></thead>"
"<tbody><tr><td>John</td><td>Doe</td></tr><tr><td>Mary</td><td>Moe"
"</td></tr></tbody></table>",
[
"┌─",
"─────────",
"─┬─",
"────────",
"─┐\n",
"│ ",
("table_head", "Firstname"),
" │ ",
("table_head", "Lastname"),
" │\n",
"├─",
"─────────",
"─┼─",
"────────",
"─┤\n",
"│ ",
(None, "John "),
" │ ",
(None, "Doe "),
" │\n",
"│ ",
(None, "Mary "),
" │ ",
(None, "Moe "),
" │\n",
"└─",
"─────────",
"─┴─",
"────────",
"─┘",
],
id="table_default",
),
case(
'<table><thead><tr><th align="left">Name</th><th align="right">Id'
'</th></tr></thead><tbody><tr><td align="left">Robert</td>'
'<td align="right">1</td></tr><tr><td align="left">Mary</td>'
'<td align="right">100</td></tr></tbody></table>',
[
"┌─",
"──────",
"─┬─",
"───",
"─┐\n",
"│ ",
("table_head", "Name "),
" │ ",
("table_head", " Id"),
" │\n",
"├─",
"──────",
"─┼─",
"───",
"─┤\n",
"│ ",
(None, "Robert"),
" │ ",
(None, " 1"),
" │\n",
"│ ",
(None, "Mary "),
" │ ",
(None, "100"),
" │\n",
"└─",
"──────",
"─┴─",
"───",
"─┘",
],
id="table_with_left_and_right_alignments",
),
case(
'<table><thead><tr><th align="center">Name</th><th align="right">Id'
'</th></tr></thead><tbody><tr><td align="center">Robert</td>'
'<td align="right">1</td></tr><tr><td align="center">Mary</td>'
'<td align="right">100</td></tr></tbody></table>',
[
"┌─",
"──────",
"─┬─",
"───",
"─┐\n",
"│ ",
("table_head", " Name "),
" │ ",
("table_head", " Id"),
" │\n",
"├─",
"──────",
"─┼─",
"───",
"─┤\n",
"│ ",
(None, "Robert"),
" │ ",
(None, " 1"),
" │\n",
"│ ",
(None, " Mary "),
" │ ",
(None, "100"),
" │\n",
"└─",
"──────",
"─┴─",
"───",
"─┘",
],
id="table_with_center_and_right_alignments",
),
case(
"<table><thead><tr><th>Name</th></tr></thead><tbody><tr><td>Foo</td>"
"</tr><tr><td>Bar</td></tr><tr><td>Baz</td></tr></tbody></table>",
[
"┌─",
"────",
"─┐\n",
"│ ",
("table_head", "Name"),
" │\n",
"├─",
"────",
"─┤\n",
"│ ",
(None, "Foo "),
" │\n",
"│ ",
(None, "Bar "),
" │\n",
"│ ",
(None, "Baz "),
" │\n",
"└─",
"────",
"─┘",
],
id="table_with_single_column",
),
case(
"<table><thead><tr><th>Column1</th></tr></thead><tbody><tr><td></td>"
"</tr></tbody></table>",
[
"┌─",
"───────",
"─┐\n",
"│ ",
("table_head", "Column1"),
" │\n",
"├─",
"───────",
"─┤\n",
"│ ",
(None, " "),
" │\n",
"└─",
"───────",
"─┘",
],
id="table_with_the_bare_minimum",
),
case(
'<time datetime="2020-08-07T04:30:00Z"> Fri, Aug 7 2020, 10:00AM IST'
"</time>",
[("msg_time", f" {TIME_MENTION_MARKER} Fri, Aug 7 2020, 10:00 (IST) ")],
id="time_human_readable_input",
),
case(
'<time datetime="2020-08-11T16:32:58Z"> 1597163578</time>',
[
(
"msg_time",
f" {TIME_MENTION_MARKER} Tue, Aug 11 2020, 22:02 (IST) ",
)
],
id="time_UNIX_timestamp_input",
),
case(
# Markdown:
# ```math
# some-math
# ```
'<span class="katex-display"><span class="katex"><semantics>'
"<annotation>some-math</annotation></semantics></span></span>",
[("msg_math", "some-math")],
id="katex_HTML_response_math_fenced_markdown",
),
case(
# Markdown:
# $$ some-math $$
'<span class="katex"><semantics><annotation>some-math</annotation>'
"</semantics></span>",
[("msg_math", "some-math")],
id="katex_HTML_response_double_$_fenced_markdown",
),
case("<ul><li>text</li></ul>", ["", " \N{BULLET} ", "", "text"], id="ul"),
case(
"<ul>\n<li>text</li>\n</ul>",
["", "", " \N{BULLET} ", "", "text", ""],
id="ul_with_ul_li_newlines",
),
case("<ol><li>text</li></ol>", ["", " 1. ", "", "text"], id="ol"),
case(
"<ol>\n<li>text</li>\n</ol>",
["", "", " 1. ", "", "text", ""],
id="ol_with_ol_li_newlines",
),
case(
'<ol start="5"><li>text</li></ol>',
["", " 5. ", "", "text"],
id="ol_starting_at_5",
),
# FIXME Strikethrough
case("<del>text</del>", ["", "text"], id="strikethrough_del"),
# FIXME inline image?
case(
'<div class="message_inline_image">'
'<a href="x"><img src="x"></a></div>',
[],
id="inline_image",
),
# FIXME inline ref?
case('<div class="message_inline_ref">blah</div>', [], id="inline_ref"),
case(
'<span class="emoji">:smile:</span>',
[("msg_emoji", ":smile:")],
id="emoji",
),
case(
'<div class="inline-preview-twitter"',
["[TWITTER PREVIEW NOT RENDERED]"],
id="preview-twitter",
),
case(
'<img class="emoji" title="zulip"/>',
[("msg_emoji", ":zulip:")],
id="zulip_extra_emoji",
),
case(
'<img class="emoji" title="github"/>',
[("msg_emoji", ":github:")],
id="custom_emoji",
),
],
)
def test_soup2markup(self, content, expected_markup, mocker):
mocker.patch(
BOXES + ".get_localzone", return_value=pytz.timezone("Asia/Kolkata")
)
soup = BeautifulSoup(content, "lxml").find(name="body")
metadata = dict(
server_url=SERVER_URL,
message_links=OrderedDict(),
time_mentions=list(),
bq_len=0,
)
markup, *_ = MessageBox.soup2markup(soup, metadata)
assert markup == [""] + expected_markup
@pytest.mark.parametrize(
"message, last_message",
[
(
{
"sender_id": 1,
"display_recipient": "Verona",
"sender_full_name": "aaron",
"submessages": [],
"stream_id": 5,
"subject": "Verona2",
"id": 37,
"subject_links": [],
"content": (
"<p>It's nice and it feels more modern, but I think"
" this will take some time to get used to</p>"
),
"timestamp": 1531716583,
"sender_realm_str": "zulip",
"client": "populate_db",
"content_type": "text/html",
"reactions": [],
"type": "stream",
"is_me_message": False,
"flags": ["read"],
"sender_email": "[email protected]",
},
None,
),
(
{
"sender_id": 5,
"display_recipient": [
{
"is_mirror_dummy": False,
"email": "[email protected]",
"id": 1,
"full_name": "aaron",
},
{
"is_mirror_dummy": False,
"email": "[email protected]",
"id": 5,
"full_name": "Iago",
},
],
"sender_full_name": "Iago",
"submessages": [],
"subject": "",
"id": 107,
"subject_links": [],
"content": "<p>what are you planning to do this week</p>",
"timestamp": 1532103879,
"sender_realm_str": "zulip",
"client": "ZulipTerminal",
"content_type": "text/html",
"reactions": [],
"type": "private",
"is_me_message": False,
"flags": ["read"],
"sender_email": "[email protected]",
},
None,
),
],
)
def test_main_view(self, mocker, message, last_message):
self.model.stream_dict = {
5: {
"color": "#bd6",
},
}
msg_box = MessageBox(message, self.model, last_message)
@pytest.mark.parametrize(
"message",
[
{
"id": 4,
"type": "stream",
"display_recipient": "Verona",
"stream_id": 5,
"subject": "Test topic",
"is_me_message": True, # will be overridden by test function.
"flags": [],
"content": "", # will be overridden by test function.
"reactions": [],
"sender_full_name": "Alice",
"timestamp": 1532103879,
}
],
)
@pytest.mark.parametrize(
"content, is_me_message",
[
("<p>/me is excited!</p>", True),
("<p>/me is excited! /me is not excited.</p>", True),
("<p>This is /me not.</p>", False),
("<p>/me is excited!</p>", False),
],
)
def test_main_view_renders_slash_me(self, mocker, message, content, is_me_message):
mocker.patch(BOXES + ".urwid.Text")
message["content"] = content
message["is_me_message"] = is_me_message
msg_box = MessageBox(message, self.model, message)
msg_box.main_view()
name_index = 11 if is_me_message else -1 # 11 = len(<str><strong>)
assert (
msg_box.message["content"].find(message["sender_full_name"]) == name_index
)
@pytest.mark.parametrize(
"message",
[
{
"id": 4,
"type": "stream",
"display_recipient": "Verona",
"stream_id": 5,
"subject": "Test topic",
"flags": [],
"is_me_message": False,
"content": "<p>what are you planning to do this week</p>",
"reactions": [],
"sender_full_name": "Alice",
"timestamp": 1532103879,
}
],
)
@pytest.mark.parametrize(
"to_vary_in_last_message",
[
{"display_recipient": "Verona offtopic"},
{"subject": "Test topic (previous)"},
{"type": "private"},
],
ids=[
"different_stream_before",
"different_topic_before",
"PM_before",
],
)
def test_main_view_generates_stream_header(
self, mocker, message, to_vary_in_last_message
):
self.model.stream_dict = {
5: {
"color": "#bd6",
},
}
last_message = dict(message, **to_vary_in_last_message)
msg_box = MessageBox(message, self.model, last_message)
view_components = msg_box.main_view()
assert len(view_components) == 3
assert isinstance(view_components[0], Columns)
assert isinstance(view_components[0][0], Text)
assert isinstance(view_components[0][1], Text)
assert isinstance(view_components[0][2], Divider)
@pytest.mark.parametrize(
"message",
[
{
"id": 4,
"type": "private",
"sender_email": "[email protected]",
"sender_id": 5,
"display_recipient": [
{"email": "[email protected]", "id": 1, "full_name": "aaron"},
{"email": "[email protected]", "id": 5, "full_name": "Iago"},
],
"flags": [],
"is_me_message": False,
"content": "<p>what are you planning to do this week</p>",
"reactions": [],
"sender_full_name": "Alice",
"timestamp": 1532103879,
},
],
)
@pytest.mark.parametrize(
"to_vary_in_last_message",
[
{
"display_recipient": [
{"email": "[email protected]", "id": 1, "full_name": "aaron"},
{"email": "[email protected]", "id": 5, "full_name": "Iago"},
{"email": "[email protected]", "id": 6, "full_name": "Someone Else"},
],
},
{"type": "stream"},
],
ids=[
"larger_pm_group",
"stream_before",
],
)
def test_main_view_generates_PM_header(
self, mocker, message, to_vary_in_last_message
):
last_message = dict(message, **to_vary_in_last_message)
msg_box = MessageBox(message, self.model, last_message)
view_components = msg_box.main_view()
assert len(view_components) == 3
assert isinstance(view_components[0], Columns)
assert isinstance(view_components[0][0], Text)
assert isinstance(view_components[0][1], Text)
assert isinstance(view_components[0][2], Divider)
@pytest.mark.parametrize(
"msg_narrow, msg_type, assert_header_bar, assert_search_bar",
[
([], 0, f"PTEST {STREAM_TOPIC_SEPARATOR} ", "All messages"),
([], 1, "You and ", "All messages"),
([], 2, "You and ", "All messages"),
(
[["stream", "PTEST"]],
0,
f"PTEST {STREAM_TOPIC_SEPARATOR} ",
("bar", [("s#bd6", "PTEST")]),
),
(
[["stream", "PTEST"], ["topic", "b"]],
0,
f"PTEST {STREAM_TOPIC_SEPARATOR}",
("bar", [("s#bd6", "PTEST"), ("s#bd6", ": topic narrow")]),
),
([["is", "private"]], 1, "You and ", "All private messages"),
([["is", "private"]], 2, "You and ", "All private messages"),
([["pm_with", "[email protected]"]], 1, "You and ", "Private conversation"),
(
[["pm_with", "[email protected], [email protected]"]],
2,
"You and ",
"Group private conversation",
),
(
[["is", "starred"]],
0,
f"PTEST {STREAM_TOPIC_SEPARATOR} ",
"Starred messages",
),
([["is", "starred"]], 1, "You and ", "Starred messages"),
([["is", "starred"]], 2, "You and ", "Starred messages"),
([["is", "starred"], ["search", "FOO"]], 1, "You and ", "Starred messages"),
(
[["search", "FOO"]],
0,
f"PTEST {STREAM_TOPIC_SEPARATOR} ",
"All messages",
),
([["is", "mentioned"]], 0, f"PTEST {STREAM_TOPIC_SEPARATOR} ", "Mentions"),
([["is", "mentioned"]], 1, "You and ", "Mentions"),
([["is", "mentioned"]], 2, "You and ", "Mentions"),
([["is", "mentioned"], ["search", "FOO"]], 1, "You and ", "Mentions"),
],
)
def test_msg_generates_search_and_header_bar(
self,
mocker,
messages_successful_response,
msg_type,
msg_narrow,
assert_header_bar,
assert_search_bar,
):
self.model.stream_dict = {
205: {
"color": "#bd6",
},
}
self.model.narrow = msg_narrow
messages = messages_successful_response["messages"]
current_message = messages[msg_type]
msg_box = MessageBox(current_message, self.model, messages[0])
search_bar = msg_box.top_search_bar()
header_bar = msg_box.top_header_bar(msg_box)
assert header_bar[0].text.startswith(assert_header_bar)
assert search_bar.text_to_fill == assert_search_bar
# Assume recipient (PM/stream/topic) header is unchanged below
@pytest.mark.parametrize(
"message",
[
{
"id": 4,
"type": "stream",
"display_recipient": "Verona",
"stream_id": 5,
"subject": "Test topic",
"flags": [],
"is_me_message": False,
"content": "<p>what are you planning to do this week</p>",
"reactions": [],
"sender_full_name": "alice",
"timestamp": 1532103879,
}
],
)
@pytest.mark.parametrize(
"current_year", [2018, 2019, 2050], ids=["now_2018", "now_2019", "now_2050"]
)
@pytest.mark.parametrize(
"starred_msg",
["this", "last", "neither"],
ids=["this_starred", "last_starred", "no_stars"],
)
@pytest.mark.parametrize(
"expected_header, to_vary_in_last_message",
[
(
[STATUS_INACTIVE, "alice", " ", "DAYDATETIME"],
{"sender_full_name": "bob"},
),
([" ", " ", " ", "DAYDATETIME"], {"timestamp": 1532103779}),
([STATUS_INACTIVE, "alice", " ", "DAYDATETIME"], {"timestamp": 0}),
],
ids=[
"show_author_as_authors_different",
"merge_messages_as_only_slightly_earlier_message",
"dont_merge_messages_as_much_earlier_message",
],
)
def test_main_view_content_header_without_header(
self,
mocker,
message,
expected_header,
current_year,
starred_msg,
to_vary_in_last_message,
):
mocked_date = mocker.patch(BOXES + ".date")
mocked_date.today.return_value = date(current_year, 1, 1)
mocked_date.side_effect = lambda *args, **kw: date(*args, **kw)
output_date_time = "Fri Jul 20 21:54" # corresponding to timestamp
self.model.formatted_local_time.side_effect = [ # for this- and last-message
output_date_time,
" ",
] * 2 # called once in __init__ and then in main_view explicitly
# The empty dict is responsible for INACTIVE status of test user.
self.model.user_dict = {} # called once in main_view explicitly
stars = {
msg: ({"flags": ["starred"]} if msg == starred_msg else {})
for msg in ("this", "last")
}
this_msg = dict(message, **stars["this"])
all_to_vary = dict(to_vary_in_last_message, **stars["last"])
last_msg = dict(message, **all_to_vary)
msg_box = MessageBox(this_msg, self.model, last_msg)
expected_header[2] = output_date_time
if current_year > 2018:
expected_header[2] = "2018 - " + expected_header[2]
expected_header[3] = "*" if starred_msg == "this" else " "
view_components = msg_box.main_view()
assert len(view_components) == 2
assert isinstance(view_components[0], Columns)
assert [w.text for w in view_components[0].widget_list] == expected_header
assert isinstance(view_components[1], Padding)
@pytest.mark.parametrize(
"to_vary_in_each_message",
[
{"sender_full_name": "bob"},
{"timestamp": 1532103779},
{"timestamp": 0},
{},
{"flags": ["starred"]},
],
ids=[
"common_author",
"common_timestamp",
"common_early_timestamp",
"common_unchanged_message",
"both_starred",
],
)
def test_main_view_compact_output(
self, mocker, message_fixture, to_vary_in_each_message
):
message_fixture.update({"id": 4})
varied_message = dict(message_fixture, **to_vary_in_each_message)
msg_box = MessageBox(varied_message, self.model, varied_message)
view_components = msg_box.main_view()
assert len(view_components) == 1
assert isinstance(view_components[0], Padding)
def test_main_view_generates_EDITED_label(
self, mocker, messages_successful_response
):
messages = messages_successful_response["messages"]
for message in messages:
self.model.index["edited_messages"].add(message["id"])
msg_box = MessageBox(message, self.model, message)
view_components = msg_box.main_view()
label = view_components[0].original_widget.contents[0]
assert label[0].text == "EDITED"
assert label[1][1] == 7
@pytest.mark.parametrize(
"to_vary_in_last_message, update_required",
[
({"sender_full_name": "Unique name (won't be in next message)"}, True),
({}, False),
],
ids=[
"author_field_present",
"author_field_not_present",
],
)
def test_update_message_author_status(
self,
message_fixture,
update_required,
to_vary_in_last_message,
):
message = message_fixture
last_msg = dict(message, **to_vary_in_last_message)
msg_box = MessageBox(message, self.model, last_msg)
assert msg_box.update_message_author_status() == update_required
@pytest.mark.parametrize("key", keys_for_command("STREAM_MESSAGE"))
@pytest.mark.parametrize(
"narrow, expect_to_prefill",
[
([], False),
([["stream", "general"]], True),
([["stream", "general"], ["topic", "Test"]], True),
([["is", "starred"]], False),
([["is", "mentioned"]], False),
([["is", "private"]], False),
([["pm_with", "[email protected]"]], False),
],
ids=[
"all_messages_narrow",
"stream_narrow",
"topic_narrow",
"private_conversation_narrow",
"starred_messages_narrow",
"mentions_narrow",
"private_messages_narrow",
],
)
def test_keypress_STREAM_MESSAGE(
self, mocker, msg_box, widget_size, narrow, expect_to_prefill, key
):
write_box = msg_box.model.controller.view.write_box
msg_box.model.narrow = narrow
size = widget_size(msg_box)
msg_box.keypress(size, key)
if expect_to_prefill:
write_box.stream_box_view.assert_called_once_with(
caption="PTEST",
stream_id=205,
)
else:
write_box.stream_box_view.assert_called_once_with(0)
@pytest.mark.parametrize("key", keys_for_command("EDIT_MESSAGE"))
@pytest.mark.parametrize(
[
"to_vary_in_each_message",
"realm_editing_allowed",
"msg_body_edit_limit",
"expect_msg_body_edit_enabled",
"expect_editing_to_succeed",
"expect_footer_text",
],
[
case(
{"sender_id": 2, "timestamp": 45, "subject": "test"},
True,
60,
{"stream": False, "private": False},
{"stream": False, "private": False},
{
"stream": " You can't edit messages sent by other users that already have a topic.",
"private": " You can't edit private messages sent by other users.",
},
id="msg_sent_by_other_user_with_topic",
),
case(
{"sender_id": 1, "timestamp": 1, "subject": "test"},
True,
60,
{"stream": False, "private": False},
{"stream": True, "private": False},
{
"stream": " Only topic editing allowed."
" Time Limit for editing the message body has been exceeded.",
"private": " Time Limit for editing the message has been exceeded.",
},
id="topic_edit_only_after_time_limit",
),
case(
{"sender_id": 1, "timestamp": 45, "subject": "test"},
False,
60,
{"stream": False, "private": False},
{"stream": False, "private": False},
{
"stream": " Editing sent message is disabled.",
"private": " Editing sent message is disabled.",
},
id="realm_editing_not_allowed",
),
case(
{"sender_id": 1, "timestamp": 45, "subject": "test"},
True,
60,
{"stream": True, "private": True},
{"stream": True, "private": True},
{"stream": None, "private": None},
id="realm_editing_allowed_and_within_time_limit",
),
case(
{"sender_id": 1, "timestamp": 1, "subject": "test"},
True,
0,
{"stream": True, "private": True},
{"stream": True, "private": True},
{"stream": None, "private": None},
id="no_msg_body_edit_limit",
),
case(
{"sender_id": 1, "timestamp": 1, "subject": "(no topic)"},
True,
60,
{"stream": False, "private": False},
{"stream": True, "private": False},
{
"stream": " Only topic editing allowed."
" Time Limit for editing the message body has been exceeded.",
"private": " Time Limit for editing the message has been exceeded.",
},
id="msg_sent_by_me_with_no_topic",
),
case(
{"sender_id": 2, "timestamp": 1, "subject": "(no topic)"},
True,
60,
{"stream": False, "private": False},
{"stream": True, "private": False},
{
"stream": " Only topic editing is allowed."
" This is someone else's message but with (no topic).",
"private": " You can't edit private messages sent by other users.",
},
id="msg_sent_by_other_with_no_topic",
),
case(
{"sender_id": 1, "timestamp": 1, "subject": "(no topic)"},
False,
60,
{"stream": False, "private": False},
{"stream": False, "private": False},
{
"stream": " Editing sent message is disabled.",
"private": " Editing sent message is disabled.",
},
id="realm_editing_not_allowed_for_no_topic",
),
case(
{"sender_id": 1, "timestamp": 45, "subject": "(no topic)"},
True,
0,
{"stream": True, "private": True},
{"stream": True, "private": True},
{"stream": None, "private": None},
id="no_msg_body_edit_limit_with_no_topic",
),
],
)
def test_keypress_EDIT_MESSAGE(
self,
mocker,
message_fixture,
widget_size,
to_vary_in_each_message,
realm_editing_allowed,
msg_body_edit_limit,
expect_msg_body_edit_enabled,
expect_editing_to_succeed,
expect_footer_text,
key,
):
if message_fixture["type"] == "private":
to_vary_in_each_message["subject"] = ""
varied_message = dict(message_fixture, **to_vary_in_each_message)
message_type = varied_message["type"]
msg_box = MessageBox(varied_message, self.model, message_fixture)
size = widget_size(msg_box)
msg_box.model.user_id = 1
msg_box.model.initial_data = {
"realm_allow_message_editing": realm_editing_allowed,
"realm_message_content_edit_limit_seconds": msg_body_edit_limit,
}
msg_box.model.fetch_raw_message_content.return_value = "Edit this message"
write_box = msg_box.model.controller.view.write_box
write_box.msg_edit_state = None
write_box.msg_body_edit_enabled = None
report_error = msg_box.model.controller.report_error
report_warning = msg_box.model.controller.report_warning
mocker.patch(BOXES + ".time", return_value=100)
msg_box.keypress(size, key)
if expect_editing_to_succeed[message_type]:
assert write_box.msg_edit_state.message_id == varied_message["id"]
assert write_box.msg_edit_state.old_topic == varied_message["subject"]
write_box.msg_write_box.set_edit_text.assert_called_once_with(
"Edit this message"
)
assert (
write_box.msg_body_edit_enabled
== expect_msg_body_edit_enabled[message_type]
)
else:
assert write_box.msg_edit_state is None
write_box.msg_write_box.set_edit_text.assert_not_called()
if expect_footer_text[message_type]:
if expect_editing_to_succeed[message_type]:
report_warning.assert_called_once_with(expect_footer_text[message_type])
else:
report_error.assert_called_once_with(expect_footer_text[message_type])
@pytest.mark.parametrize(
"raw_html, expected_content",
[
# Avoid reformatting to preserve quote result readability
# fmt: off
case("""<blockquote>
<p>A</p>
</blockquote>
<p>B</p>""",
("{} A\n\n"
"B"),
id="quoted level 1"),
case("""<blockquote>
<blockquote>
<p>A</p>
</blockquote>
<p>B</p>
</blockquote>
<p>C</p>""",
("{} {} A\n\n"
"{} B\n\n"
"C"),
id="quoted level 2"),
case("""<blockquote>
<blockquote>
<blockquote>
<p>A</p>
</blockquote>
<p>B</p>
</blockquote>
<p>C</p>
</blockquote>
<p>D</p>""",
("{} {} {} A\n\n"
"{} {} B\n\n"
"{} C\n\n"
"D"),
id="quoted level 3"),
case("""<blockquote>
<p>A<br>
B</p>
</blockquote>
<p>C</p>""",
("{} A\n"
"{} B\n\n"
"C"),
id="multi-line quoting"),
case("""<blockquote>
<p><a href='https://chat.zulip.org/'>czo</a></p>
</blockquote>""",
("{} czo [1]\n"),
id="quoting with links"),
case("""<blockquote>
<blockquote>
<p>A<br>
B</p>
</blockquote>
</blockquote>""",
("{} {} A\n"
"{} {} B\n\n"),
id="multi-line level 2"),
case("""<blockquote>
<blockquote>
<p>A</p>
</blockquote>
<p>B</p>
<blockquote>
<p>C</p>
</blockquote>
</blockquote>""",
("{} {} A\n"
"{} B\n"
"{} \n"
"{} {} C\n\n"),
id="quoted level 2-1-2"),
case("""<p><a href='https://chat.zulip.org/1'>czo</a></p>
<blockquote>
<p><a href='https://chat.zulip.org/2'>czo</a></p>
<blockquote>
<p>A<br>
B</p>
</blockquote>
<p>C</p>
</blockquote>
<p>D</p>""",
("czo [1]\n"
"{} czo [2]\n"
"{} \n"
"{} {} A\n"
"{} {} B\n\n"
"{} C\n\n"
"D"),
id="quoted with links level 2"),
case("""<blockquote>
<blockquote>
<blockquote>
<p>A</p>
</blockquote>
<p>B</p>
<blockquote>
<p>C</p>
</blockquote>
<p>D</p>
</blockquote>
<p>E</p>
</blockquote>
<p>F</p>""",
("{} {} {} A\n"
"{} {} B\n"
"{} {} \n"
"{} {} {} C\n\n"
"{} {} D\n\n"
"{} E\n\n"
"F"),
id="quoted level 3-2-3"),
case("""<blockquote>
<p>A</p>
<blockquote>
<blockquote>
<blockquote>
<p>B<br>
C</p>
</blockquote>
</blockquote>
</blockquote>
</blockquote>""",
("{} A\n"
"{} {} {} B\n"
"{} {} {} C\n"),
id="quoted level 1-3",
marks=pytest.mark.xfail(reason="rendered_bug")),
case("""<blockquote>
<p><a href="https://chat.zulip.org/1">czo</a></p>
<blockquote>
<p><a href="https://chat.zulip.org/2">czo</a></p>
<blockquote>
<p>A<br>
B</p>
</blockquote>
<p>C</p>
</blockquote>
<p>D<br>
E</p>
</blockquote>""",
("{} czo [1]\n"
"{} {} czo [2]\n"
"{} {} {} A\n"
"{} {} {} B\n"
"{} {} C\n"
"{} D\n"
"{} E\n"),
id="quoted with links level 1-3-1",
marks=pytest.mark.xfail(reason="rendered_bug")),
# fmt: on
],
)
def test_transform_content(self, mocker, raw_html, expected_content):
expected_content = expected_content.replace("{}", QUOTED_TEXT_MARKER)
content, *_ = MessageBox.transform_content(raw_html, SERVER_URL)
rendered_text = Text(content)
assert rendered_text.text == expected_content
# FIXME This is the same parametrize as MsgInfoView:test_height_reactions
@pytest.mark.parametrize(
"to_vary_in_each_message",
[
{
"reactions": [
{
"emoji_name": "thumbs_up",
"emoji_code": "1f44d",
"user": {
"email": "[email protected]",
"full_name": "Iago",
"id": 5,
},
"reaction_type": "unicode_emoji",
},
{
"emoji_name": "zulip",
"emoji_code": "zulip",
"user": {
"email": "[email protected]",
"full_name": "Iago",
"id": 5,
},
"reaction_type": "zulip_extra_emoji",
},
{
"emoji_name": "zulip",
"emoji_code": "zulip",
"user": {
"email": "[email protected]",
"full_name": "aaron",
"id": 1,
},
"reaction_type": "zulip_extra_emoji",
},
{
"emoji_name": "heart",
"emoji_code": "2764",
"user": {
"email": "[email protected]",
"full_name": "Iago",
"id": 5,
},
"reaction_type": "unicode_emoji",
},
]
}
],
)
def test_reactions_view(self, message_fixture, to_vary_in_each_message):
self.model.user_id = 1
varied_message = dict(message_fixture, **to_vary_in_each_message)
msg_box = MessageBox(varied_message, self.model, None)
reactions = to_vary_in_each_message["reactions"]
reactions_view = msg_box.reactions_view(reactions)
assert reactions_view.original_widget.text == (
":heart: 1 :thumbs_up: 1 :zulip: 2 "
)
assert reactions_view.original_widget.attrib == [
("reaction", 9),
(None, 1),
("reaction", 13),
(None, 1),
("reaction_mine", 9),
]
@pytest.mark.parametrize(
"message_links, expected_text, expected_attrib, expected_footlinks_width",
[
case(
OrderedDict(
[
(
"https://github.com/zulip/zulip-terminal/pull/1",
("#T1", 1, True),
),
]
),
"1: https://github.com/zulip/zulip-terminal/pull/1",
[("msg_link_index", 2), (None, 1), ("msg_link", 46)],
49,
id="one_footlink",
),
case(
OrderedDict(
[
("https://foo.com", ("Foo!", 1, True)),
("https://bar.com", ("Bar!", 2, True)),
]
),
"1: https://foo.com\n2: https://bar.com",
[
("msg_link_index", 2),
(None, 1),
("msg_link", 15),
(None, 1),
("msg_link_index", 2),
(None, 1),
("msg_link", 15),
],
18,
id="more_than_one_footlink",
),
case(
OrderedDict(
[
("https://example.com", ("https://example.com", 1, False)),
("http://example.com", ("http://example.com", 2, False)),
]
),
None,
None,
0,
id="similar_link_and_text",
),
case(
OrderedDict(
[
("https://foo.com", ("https://foo.com, Text", 1, True)),
("https://bar.com", ("Text, https://bar.com", 2, True)),
]
),
"1: https://foo.com\n2: https://bar.com",
[
("msg_link_index", 2),
(None, 1),
("msg_link", 15),
(None, 1),
("msg_link_index", 2),
(None, 1),
("msg_link", 15),
],
18,
id="different_link_and_text",
),
case(
OrderedDict(
[
("https://foo.com", ("Foo!", 1, True)),
("http://example.com", ("example.com", 2, False)),
("https://bar.com", ("Bar!", 3, True)),
]
),
"1: https://foo.com\n3: https://bar.com",
[
("msg_link_index", 2),
(None, 1),
("msg_link", 15),
(None, 1),
("msg_link_index", 2),
(None, 1),
("msg_link", 15),
],
18,
id="http_default_scheme",
),
],
)
def test_footlinks_view(
self, message_links, expected_text, expected_attrib, expected_footlinks_width
):
footlinks, footlinks_width = MessageBox.footlinks_view(
message_links,
maximum_footlinks=3,
padded=True,
wrap="ellipsis",
)
if expected_text:
assert footlinks.original_widget.text == expected_text
assert footlinks.original_widget.attrib == expected_attrib
assert footlinks_width == expected_footlinks_width
else:
assert footlinks is None
assert not hasattr(footlinks, "original_widget")
@pytest.mark.parametrize(
"maximum_footlinks, expected_instance",
[
(0, type(None)),
(1, Padding),
(3, Padding),
],
)
def test_footlinks_limit(self, maximum_footlinks, expected_instance):
message_links = OrderedDict(
[
("https://github.com/zulip/zulip-terminal", ("ZT", 1, True)),
]
)
footlinks, _ = MessageBox.footlinks_view(
message_links,
maximum_footlinks=maximum_footlinks,
padded=True,
wrap="ellipsis",
)
assert isinstance(footlinks, expected_instance)
@pytest.mark.parametrize(
"key", keys_for_command("ENTER"), ids=lambda param: f"left_click-key:{param}"
)
def test_mouse_event_left_click(
self, mocker, msg_box, key, widget_size, compose_box_is_open
):
size = widget_size(msg_box)
col = 1
row = 1
focus = mocker.Mock()
mocker.patch(BOXES + ".keys_for_command", return_value=[key])
mocker.patch.object(msg_box, "keypress")
msg_box.model = mocker.Mock()
msg_box.model.controller.is_in_editor_mode.return_value = compose_box_is_open
msg_box.mouse_event(size, "mouse press", 1, col, row, focus)
if compose_box_is_open:
msg_box.keypress.assert_not_called()
else:
msg_box.keypress.assert_called_once_with(size, key)
|
the-stack_0_18039 | from sympy import *
x,y,z = symbols('x y z')
all_symbols = [x,y]
ro_x=5
ro_y=5
psi=exp(-(((x-1)*(x-1))/(2*ro_x*ro_x) + ((y-2)*(y-2))/(2*ro_y*ro_y)))
target_value = 0
model_function = z - psi
obs_eq = Matrix([target_value - model_function]).vec()
obs_eq_jacobian = obs_eq.jacobian(all_symbols)
print(psi)
print(obs_eq)
print(obs_eq_jacobian)
print(latex(psi))
print(latex(obs_eq_jacobian))
with open("example_func_xy_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void example_func_xy(double &psi, double x, double y)\n")
f_cpp.write("{")
f_cpp.write("psi = %s;\n"%(ccode(psi)))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_example_func_xy(double &delta, double x, double y, double z)\n")
f_cpp.write("{")
f_cpp.write("delta = %s;\n"%(ccode(obs_eq[0,0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_example_func_xy_jacobian(Eigen::Matrix<double, 1, 2> &j, double x, double y)\n")
f_cpp.write("{")
for i in range (1):
for j in range (2):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(obs_eq_jacobian[i,j])))
f_cpp.write("}")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.