content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import subprocess
if __name__ == "__main__":
for fiber_xposition in range(-5, 6):
command = f"mpirun -np {6} python fiber.py --fiber_xposition={fiber_xposition} > log.log"
print(command)
subprocess.call(command, shell=True)
| 31.625 | 97 | 0.667984 | [
"MIT"
] | joamatab/grating_coupler_meep | grating_coupler_meep/fiber_sweep_fiber_xposition.py | 253 | Python |
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
import templar
version = templar.__version__
dependencies = [
'jinja2==2.8',
]
setup(
name='templar',
version=version,
description='A static templating engine written in Python',
url='https://github.com/albert12132/templar',
author='Albert Wu',
author_email='[email protected]',
license='MIT',
keywords=['templating', 'static template', 'markdown'],
packages=find_packages(exclude=['tests*']),
install_requires=dependencies,
entry_points={
'console_scripts': [
'templar=templar.cli.templar:main',
'markdown=templar.markdown:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Text Processing :: Markup :: HTML',
],
)
| 28.275 | 63 | 0.633068 | [
"MIT"
] | Cal-CS-61A-Staff/templar | setup.py | 1,131 | Python |
# coding=utf-8
from src.tk import TK
import argparse
parser = argparse.ArgumentParser()
parser.register('type', 'bool', (lambda x: x.lower() in ('True', "yes", "true", "t", "1")))
parser.add_argument('--mode', default='main', help='')
args = parser.parse_args()
if args.mode == 'main':
window = TK()
window.start()
elif args.mode == 'N_0_2':
from src.N_0_2 import KO
ko = KO()
ko.solve()
elif args.mode == 'test':
from src.test import KO
ko = KO()
ko.solve()
elif args.mode == 'E_2_4':
from src.E_2_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_3_4':
from src.E_3_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_4_4':
from src.E_4_4 import KO
ko = KO()
ko.solve()
elif args.mode == 'paper':
from src.paper import KO
ko = KO()
ko.solve()
elif args.mode == 'N_5_6':
from src.N_5_6 import KO
ko = KO()
ko.solve()
elif args.mode == 'E_10_4':
from src.E_10_4 import KO
ko = KO()
ko.solve()
else:
pass
| 21.333333 | 91 | 0.586914 | [
"Apache-2.0"
] | a892574222/game | main.py | 1,024 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetListenerResult',
'AwaitableGetListenerResult',
'get_listener',
'get_listener_output',
]
warnings.warn("""aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""", DeprecationWarning)
@pulumi.output_type
class GetListenerResult:
"""
A collection of values returned by getListener.
"""
def __init__(__self__, alpn_policy=None, arn=None, certificate_arn=None, default_actions=None, id=None, load_balancer_arn=None, port=None, protocol=None, ssl_policy=None, tags=None):
if alpn_policy and not isinstance(alpn_policy, str):
raise TypeError("Expected argument 'alpn_policy' to be a str")
pulumi.set(__self__, "alpn_policy", alpn_policy)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if certificate_arn and not isinstance(certificate_arn, str):
raise TypeError("Expected argument 'certificate_arn' to be a str")
pulumi.set(__self__, "certificate_arn", certificate_arn)
if default_actions and not isinstance(default_actions, list):
raise TypeError("Expected argument 'default_actions' to be a list")
pulumi.set(__self__, "default_actions", default_actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_arn and not isinstance(load_balancer_arn, str):
raise TypeError("Expected argument 'load_balancer_arn' to be a str")
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if ssl_policy and not isinstance(ssl_policy, str):
raise TypeError("Expected argument 'ssl_policy' to be a str")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> str:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> str:
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> Sequence['outputs.GetListenerDefaultActionResult']:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> str:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> int:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> str:
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
class AwaitableGetListenerResult(GetListenerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetListenerResult(
alpn_policy=self.alpn_policy,
arn=self.arn,
certificate_arn=self.certificate_arn,
default_actions=self.default_actions,
id=self.id,
load_balancer_arn=self.load_balancer_arn,
port=self.port,
protocol=self.protocol,
ssl_policy=self.ssl_policy,
tags=self.tags)
def get_listener(arn: Optional[str] = None,
load_balancer_arn: Optional[str] = None,
port: Optional[int] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetListenerResult:
"""
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
"""
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
__args__ = dict()
__args__['arn'] = arn
__args__['loadBalancerArn'] = load_balancer_arn
__args__['port'] = port
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:elasticloadbalancingv2/getListener:getListener', __args__, opts=opts, typ=GetListenerResult).value
return AwaitableGetListenerResult(
alpn_policy=__ret__.alpn_policy,
arn=__ret__.arn,
certificate_arn=__ret__.certificate_arn,
default_actions=__ret__.default_actions,
id=__ret__.id,
load_balancer_arn=__ret__.load_balancer_arn,
port=__ret__.port,
protocol=__ret__.protocol,
ssl_policy=__ret__.ssl_policy,
tags=__ret__.tags)
@_utilities.lift_output_func(get_listener)
def get_listener_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
load_balancer_arn: Optional[pulumi.Input[Optional[str]]] = None,
port: Optional[pulumi.Input[Optional[int]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetListenerResult]:
"""
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
"""
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
...
| 39.086364 | 198 | 0.671822 | [
"ECL-2.0",
"Apache-2.0"
] | RafalSumislawski/pulumi-aws | sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py | 8,599 | Python |
# modmerger framework
# by sphere
modmerger_version = 201
# Note: the following is from Warband 1.127 module system.
from modmerger_options import *
# list of current module components
# not in use atm
mod_components = [
"animations",
"constants",
"dialogs",
"factions",
"game_menus",
"info",
"info_pages",
"items",
"map_icons",
"meshes",
"mission_templates",
"music",
"particle_systems",
"parties",
"party_templates",
"postfx",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"tableau_materials",
"triggers",
"troops",
"variables",
]
# these are components that do not need to be branded
mod_components0=[
"info",
]
# These are the components requiring full import of symbols. Currently only "constants"
mod_components1=[
"constants",
]
# these are components which passes in variable with same name as the component name itself
mod_components2=[
"animations",
"dialogs",
"game_menus",
"info_pages",
"items",
"map_icons",
"meshes",
"particle_systems",
"parties",
"party_templates",
"presentations",
"quests",
"scenes",
"scene_props",
"scripts",
"simple_triggers",
"skills",
"skins",
"sounds",
"strings",
"triggers",
"troops",
]
# This is a list of components with a list of the important global variables defined in it)
mod_components3={
#"info": ["export_dir"], # export_dir
"variables" : ["reserved_variables"] , # reserved_variables
"music": ["tracks"], # tracks
"tableau_materials" : ["tableaus"] , # tableaus
"postfx" : ["postfx_params"], # postfx_params
"factions" :["factions","default_kingdom_relations"],
"mission_templates": [
"mission_templates",
"multiplayer_server_check_belfry_movement",
"multiplayer_server_spawn_bots",
"multiplayer_server_manage_bots",
"multiplayer_server_check_polls",
"multiplayer_server_check_end_map",
"multiplayer_once_at_the_first_frame",
"multiplayer_battle_window_opened",
"common_battle_mission_start",
"common_battle_tab_press",
"common_battle_init_banner",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"custom_battle_check_victory_condition",
"custom_battle_check_defeat_condition",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
"tournament_triggers",
],
}
# fix for mb vanilla
if module_sys_info["version"] <= 1011:
mod_components.remove("info_pages")
mod_components.remove("postfx")
mod_components3["mission_templates"] = [ #1011 version
"mission_templates",
"common_battle_mission_start",
"common_battle_tab_press",
"common_arena_fight_tab_press",
"common_custom_battle_tab_press",
"common_battle_victory_display",
"common_siege_question_answered",
"common_custom_battle_question_answered",
"common_custom_siege_init",
"common_siege_init",
"common_music_situation_update",
"common_siege_ai_trigger_init",
"common_siege_ai_trigger_init_2",
"common_siege_ai_trigger_init_after_2_secs",
"common_siege_defender_reinforcement_check",
"common_siege_defender_reinforcement_archer_reposition",
"common_siege_attacker_reinforcement_check",
"common_siege_attacker_do_not_stall",
"common_battle_check_friendly_kills",
"common_battle_check_victory_condition",
"common_battle_victory_display",
"common_siege_refill_ammo",
"common_siege_check_defeat_condition",
"common_battle_order_panel",
"common_battle_order_panel_tick",
"common_battle_inventory",
"common_inventory_not_available",
"common_siege_init_ai_and_belfry",
"common_siege_move_belfry",
"common_siege_rotate_belfry",
"common_siege_assign_men_to_belfry",
]
# gets the type of component on whether it is found in mod_components1 or mod_components2. Those not found in either are returned as 0
def get_component_type(component_name):
comp_type = 0
try:
mod_components1.index(component_name)
comp_type |= 1
except ValueError:
pass
try:
mod_components2.index(component_name)
comp_type |= 2
except ValueError:
pass
try:
mod_components3[component_name]
comp_type |= 4
except KeyError:
pass
return comp_type | 29.577889 | 136 | 0.663099 | [
"MIT"
] | ChroniclesStudio/money-and-honour | src/modmerger_header.py | 5,886 | Python |
#!/usr/bin/env python3
import json
import argparse
import datetime
class TicketManager:
ticketfile = '/Users/ben/ticketing/tickets.json'
def __init__(self: object, ticketfile: str='/Users/ben/Google Drive/code/ticketing/tickets.json')->object:
self.ticketfille = ticketfile
self.read_tickets()
def read_tickets(self)-> None:
self.tickets = json.load(open(self.ticketfile))
def write_tickets(self)-> None:
json.dump(self.tickets, open(self.ticketfile, "w"), indent=4)
def create_ticket(self, title="", desc="", dest="", due="", pri=0, completed=False):
ticket = {"title": title,
"desc": desc,
"for": dest,
"time_in": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
"time_out": due,
"nice": pri,
"completed": completed
}
self.tickets[title] = ticket
self.write_tickets()
self.read_tickets()
def update_ticket(self, title, new_completed):
self.tickets[title]["completed"] = new_completed
self.write_tickets()
self.read_tickets()
def show_all_tickets(self):
for ticket in self.tickets.values():
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET DONE: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['completed'], ticket['nice']))
def show_unifnished(self):
flag = False
for ticket in self.tickets.values():
if not ticket['completed']:
flag = True
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['nice']))
if not flag:
print("No Unfinished Tasks!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--mode", action="store", dest="mode", default='ls')
parser.add_argument("--title", action="store", dest="title")
parser.add_argument("--desc", action="store", dest="desc")
parser.add_argument("--for", action="store", dest="dest")
parser.add_argument("--due", action="store", dest="time_out")
parser.add_argument("--pri", action="store", dest="nice")
parser.add_argument("--done", action="store_true",
dest="completed", default=False)
args = parser.parse_args()
tm = TicketManager("tickets.json")
if args.mode == "ls":
tm.show_unifnished()
elif args.mode == "ls2":
tm.show_all_tickets()
elif args.mode == "new" or args.mode == "add":
tm.create_ticket(title=args.title, desc=args.desc, dest=args.dest,
due=args.time_out, pri=args.nice, completed=args.completed)
print("New Task '{}' Added".format(args.title))
elif args.mode == "up":
tm.update_ticket(args.title, args.completed)
| 35.23913 | 110 | 0.588834 | [
"MIT"
] | benhg/work-tickets | worktickets.py | 3,242 | Python |
import pandas as pd
import datetime as dt
import smtplib as st
import random as rd
FROM = "[email protected]"
PASSWORD = "1234abc()"
SUBJECT = "Happy birthday!"
LETTERS = [1, 2, 3]
PLACEHOLDER = "[NAME]"
PATH = "birthdays.csv"
C_NAME = "name"
C_EMAIL = "email"
C_YEAR = "year"
C_MONTH = "month"
C_DAY = "day"
data = pd.read_csv(PATH)
current = dt.datetime.now()
for row in data.iterrows():
row = row[1]
birthday = dt.datetime(int(row[C_YEAR]), int(row[C_MONTH]), int(row[C_DAY]))
if current.month == birthday.month and current.day == birthday.day:
number = rd.choice(LETTERS)
with open(f"letter_templates/letter_{number}.txt") as handle:
letter = handle.read()
letter = letter.replace(PLACEHOLDER, row[C_NAME])
with st.SMTP("smtp.gmail.com") as connection:
message = f"Subject:{SUBJECT}\n\n{letter}"
connection.starttls()
connection.login(user=FROM, password=PASSWORD)
connection.sendmail(
from_addr=FROM,
to_addrs=row[C_EMAIL],
msg=message
)
| 25.636364 | 80 | 0.617908 | [
"MIT"
] | YosafatM/100-days-of-Python | Intermedio Avanzado/32 Felicitaciones/main.py | 1,128 | Python |
from datetime import datetime
import unittest
from unittest.mock import MagicMock
import numpy as np
from pyhsi.cameras import BaslerCamera
class MockGrab:
def __init__(self, data):
self.Array = data
def GrabSucceeded(self):
return True
def Release(self):
pass
class TestBaslerCamera(unittest.TestCase):
def setUp(self):
self.mock_device = MagicMock()
self.mock_stage = MagicMock()
self.mock_stage.default_velocity = 20
self.cam = BaslerCamera(device=self.mock_device)
def test_capture(self):
self.mock_device.RetrieveResult = MagicMock(side_effect=[
MockGrab([[0, 12], [3, 100]]),
MockGrab([[9, 8], [31, 5]])
])
self.mock_stage.is_moving = MagicMock(side_effect=[True, True, False])
data = self.cam.capture(self.mock_stage, [0, 100])
target = np.array([[[12, 100], [0, 3]], [[8, 5], [9, 31]]])
np.testing.assert_array_equal(data, target)
def test_file_name_basic(self):
fn = "test_sample"
out = self.cam._process_file_name(fn, datetime(2020, 6, 20),
0, 100, 10, (227, 300, 400))
self.assertEqual(out, "test_sample.hdr")
def test_file_name_fields(self):
fn = "sample_{date}_{time}_exp={exp}_{frames}_frames"
out = self.cam._process_file_name(fn, datetime(2020, 6, 20, 13, 40),
0, 100, 10, (227, 300, 400))
target = "sample_2020-06-20_13:40:00_exp=4000_227_frames.hdr"
self.assertEqual(out, target)
if __name__ == "__main__":
unittest.main()
| 30.62963 | 78 | 0.603386 | [
"MIT"
] | rddunphy/pyHSI | test/test_cameras.py | 1,654 | Python |
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
class TripleSoftmaxLoss(nn.Module):
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
vocab,
document_coef: float = 0.4,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(TripleSoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.hidden = 1000
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
self.document_coef = document_coef
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 2
logging.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.relu = nn.ReLU()
self.document2hidden = nn.Linear(291868, self.hidden)
self.hidden2output = nn.Linear(self.hidden, 768)
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, document_rep: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
document_rep = self.relu(self.hidden2output(self.relu(self.document2hidden(document_rep.float()))))
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
vectors_concat.append(torch.abs(rep_a - document_rep))
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
loss = (1.0 - self.document_coef) * loss_fct(output, labels.view(-1))
loss -= self.document_coef * torch.sum(torch.cosine_similarity(document_rep, rep_b)) # todo: MMI가 들어가면 좋긴하겠다.
return loss
else:
return reps, output | 43.258065 | 121 | 0.670022 | [
"Apache-2.0"
] | jaimeenahn/COVID-sentence-bert | sentence_transformers/losses/TripleSoftmaxLoss.py | 2,702 | Python |
import string
from flask import Blueprint
from flask import abort
from flask import redirect
from flask import render_template
from meerkat import utils
from meerkat.db import DataAccess
page = Blueprint('simple', __name__)
@page.route('/simple/')
def simple_index():
links = DataAccess.get_libs()
links = sorted(links, key=string.lower)
return render_template('simple.html', links=links)
@page.route('/simple/<prefix>/')
def simple(prefix=''):
normalized, prefix = utils.normalize_pkg_name(prefix)
if normalized:
return redirect('/simple/{0}/'.format(prefix))
if not DataAccess.has_lib(prefix):
abort(404)
links = []
for package in DataAccess.get_packages_by_lib(prefix):
info = DataAccess.get_package(package)
href = '/packages/{0}#md5={1}'.format(package, info.get('md5'))
links.append(dict(file=package, href=href))
return render_template('simple_detail.html', links=links, prefix=prefix)
| 26.763158 | 77 | 0.679449 | [
"MIT"
] | by46/meerkat | meerkat/views/simple.py | 1,017 | Python |
re.VERBOSE | 10 | 10 | 0.9 | [
"BSD-3-Clause"
] | kuanpern/jupyterlab-snippets-multimenus | example_snippets/multimenus_snippets/NewSnippets/Python/Regular expressions/Compilation flags/Enable verbose REs, for cleaner and more organized code.py | 10 | Python |
import socket
import random
import os
import requests
import re
import github
import minecraft
import string
import sys
HOST = "irc.libera.chat"
PORT = 6667
NICK = "DoveBot"
#PASSWORD = os.getenv("PASSWORD")
CHANNEL = "#dovegaming"
SERVER = ""
readbuffer = ""
def send(message):
s.send(message)
print(message)
s = socket.socket()
s.connect((HOST, PORT))
send(bytes("NICK %s\r\n" % NICK, "UTF-8"))
send(bytes("USER %s %s %s :%s\r\n" % (NICK, NICK, NICK, NICK), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ regain {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ identify {} {}\r\n".format(NICK, PASSWORD), "UTF-8"))
send(bytes("JOIN {}\r\n".format(CHANNEL), "UTF-8"))
#s.send(bytes("PRIVMSG NickServ :identify {}\r\n".format(PASSWORD), "UTF-8"))
readbuffer = readbuffer + s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
SERVER = str.rstrip(line)[1:].split()[0]
print(str.rstrip(line))
while 1:
readbuffer = readbuffer + s.recv(1024).decode("UTF-8")
temp = str.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
print(str.rstrip(line))
message = str.rstrip(line).split(" PRIVMSG {} :".format(CHANNEL))
if "PING" in line: send("PONG :{}\r\n".format(SERVER).encode("utf-8"))
msg = message[-1]
tokens = msg.split()
if msg == "$hello": send("PRIVMSG {} :Hello!\r\n".format(CHANNEL).encode("utf-8"))
if msg == "$ping": send("PRIVMSG {} :Pong!\r\n".format(CHANNEL).encode("utf-8"))
if msg == "$random": send("PRIVMSG {} :{}\r\n".format(CHANNEL, random.randint(0, 100)).encode("utf-8"))
if msg.startswith("$youtube "):
html = requests.get("https://www.youtube.com/results?search_query=" + " ".join(msg.split()[1:])).content
video_ids = re.findall(r"watch\?v=(\S{11})", html.decode())
send("PRIVMSG {} :https://www.youtube.com/watch?v={}\r\n".format(CHANNEL, video_ids[0]).encode("utf-8"))
#if msg.startswith("$google "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, googlesearch.search(" ".join(msg.split()[1:]))[0]).encode("utf-8"))
#if msg.startswith("$wolfram "): send("PRIVMSG {} :{}\r\n".format(CHANNEL, wolfram.get(" ".join(msg.split()[1:]))).encode("utf-8"))
if msg.startswith("$github "):
if tokens[1] == "url": send("PRIVMSG {} :https://github.com/{}/{}\r\n".format(CHANNEL, tokens[2], tokens[3]).encode("utf-8"))
if tokens[1] == "issues": send("PRIVMSG {} :#{}: {}\r\n".format(CHANNEL, tokens[4], github.get_issue_title(tokens[2], tokens[3], tokens[4])).encode("utf-8"))
if msg == "$server": send("PRIVMSG {} :{}\r\n".format(CHANNEL, minecraft.get()).encode("utf-8"))
if msg == "$help": send("PRIVMSG {} :Avalible commands: $hello, $ping, $youtube, $google, $github, $wolfram.\r\n".format(CHANNEL).encode("utf-8"))
if msg.startswith("$help "):
if tokens[1] == "hello": send("PRIVMSG {} :Syntax: $hello Action: Says \"Hello!\".\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "ping":send("PRIVMSG {} :Syntax: $ping Action: Says \"Ping!\".\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "youtube": send("PRIVMSG {} :Syntax: $youtube <keyword> Action: Sends the URL of a YouTube video matching the keyword given.\r\n".format(CHANNEL).encode("utf-8"))
#if tokens[1] == "google": send("PRIVMSG {} :Syntax: $google <keyword> Action: Sends the URL of a google search with the keyword given\r\n".format(CHANNEL).encode("utf-8"))
if tokens[1] == "github": send("PRIVMSG {} :Syntax: $github <topic> <user> <repo> <number> Action: Returns data about a github repo.\r\n".format(CHANNEL).encode("utf-8"))
#if tokens[1] == "wolfram": send("PRIVMSG {} :Syntax: $wolfram <query> Action: Asks Wolfram|Alpha the query given.\r\n".format(CHANNEL).encode("utf-8"))
| 60.615385 | 190 | 0.612183 | [
"MIT"
] | dovegaming/dovenetwork | runtime/bots/irc/main.py | 3,940 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = [
'GetAmiIdsResult',
'AwaitableGetAmiIdsResult',
'get_ami_ids',
]
warnings.warn("""aws.getAmiIds has been deprecated in favor of aws.ec2.getAmiIds""", DeprecationWarning)
@pulumi.output_type
class GetAmiIdsResult:
"""
A collection of values returned by getAmiIds.
"""
def __init__(__self__, executable_users=None, filters=None, id=None, ids=None, name_regex=None, owners=None, sort_ascending=None):
if executable_users and not isinstance(executable_users, list):
raise TypeError("Expected argument 'executable_users' to be a list")
pulumi.set(__self__, "executable_users", executable_users)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if sort_ascending and not isinstance(sort_ascending, bool):
raise TypeError("Expected argument 'sort_ascending' to be a bool")
pulumi.set(__self__, "sort_ascending", sort_ascending)
@property
@pulumi.getter(name="executableUsers")
def executable_users(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "executable_users")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetAmiIdsFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter
def owners(self) -> Sequence[str]:
return pulumi.get(self, "owners")
@property
@pulumi.getter(name="sortAscending")
def sort_ascending(self) -> Optional[bool]:
return pulumi.get(self, "sort_ascending")
class AwaitableGetAmiIdsResult(GetAmiIdsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAmiIdsResult(
executable_users=self.executable_users,
filters=self.filters,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
owners=self.owners,
sort_ascending=self.sort_ascending)
def get_ami_ids(executable_users: Optional[Sequence[str]] = None,
filters: Optional[Sequence[pulumi.InputType['GetAmiIdsFilterArgs']]] = None,
name_regex: Optional[str] = None,
owners: Optional[Sequence[str]] = None,
sort_ascending: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAmiIdsResult:
"""
Use this data source to get a list of AMI IDs matching the specified criteria.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ubuntu = aws.ec2.get_ami_ids(filters=[aws.ec2.GetAmiIdsFilterArgs(
name="name",
values=["ubuntu/images/ubuntu-*-*-amd64-server-*"],
)],
owners=["099720109477"])
```
:param Sequence[str] executable_users: Limit search to users with *explicit* launch
permission on the image. Valid items are the numeric account ID or `self`.
:param Sequence[pulumi.InputType['GetAmiIdsFilterArgs']] filters: One or more name/value pairs to filter off of. There
are several valid keys, for a full reference, check out
[describe-images in the AWS CLI reference][1].
:param str name_regex: A regex string to apply to the AMI list returned
by AWS. This allows more advanced filtering not supported from the AWS API.
This filtering is done locally on what AWS returns, and could have a performance
impact if the result is large. It is recommended to combine this with other
options to narrow down the list AWS returns.
:param Sequence[str] owners: List of AMI owners to limit search. At least 1 value must be specified. Valid values: an AWS account ID, `self` (the current account), or an AWS owner alias (e.g. `amazon`, `aws-marketplace`, `microsoft`).
:param bool sort_ascending: Used to sort AMIs by creation time.
"""
pulumi.log.warn("""get_ami_ids is deprecated: aws.getAmiIds has been deprecated in favor of aws.ec2.getAmiIds""")
__args__ = dict()
__args__['executableUsers'] = executable_users
__args__['filters'] = filters
__args__['nameRegex'] = name_regex
__args__['owners'] = owners
__args__['sortAscending'] = sort_ascending
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAmiIds:getAmiIds', __args__, opts=opts, typ=GetAmiIdsResult).value
return AwaitableGetAmiIdsResult(
executable_users=__ret__.executable_users,
filters=__ret__.filters,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
owners=__ret__.owners,
sort_ascending=__ret__.sort_ascending)
| 39.88125 | 238 | 0.667137 | [
"ECL-2.0",
"Apache-2.0"
] | elad-snyk/pulumi-aws | sdk/python/pulumi_aws/get_ami_ids.py | 6,381 | Python |
# header files
import torch
import torch.nn as nn
import torchvision
import numpy as np
# define network (remember input size: (224 x 224 x 3))
class DenseNet_121(torch.nn.Module):
# define dense block
def dense_block(self, input_channels):
return torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 128, kernel_size=1, bias=False),
torch.nn.BatchNorm2d(128),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(128, 32, kernel_size=3, padding=1, bias=False),
torch.nn.BatchNorm2d(32),
torch.nn.ReLU(inplace=True)
)
# init function
def __init__(self, num_classes = 2):
super(DenseNet_121, self).__init__()
self.features = torch.nn.Sequential(
torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
torch.nn.BatchNorm2d(64),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
# dense block 1 (56 x 56 x 64)
self.dense_block_1_1 = self.dense_block(64)
self.dense_block_1_2 = self.dense_block(96)
self.dense_block_1_3 = self.dense_block(128)
self.dense_block_1_4 = self.dense_block(160)
self.dense_block_1_5 = self.dense_block(192)
self.dense_block_1_6 = self.dense_block(224)
# transition block 1
self.transition_block_1 = torch.nn.Sequential(
torch.nn.Conv2d(256, 128, kernel_size=1, bias=False),
torch.nn.AvgPool2d(kernel_size=2, stride=2)
)
# dense block 2 (28 x 28 x 128)
self.dense_block_2_1 = self.dense_block(128)
self.dense_block_2_2 = self.dense_block(160)
self.dense_block_2_3 = self.dense_block(192)
self.dense_block_2_4 = self.dense_block(224)
self.dense_block_2_5 = self.dense_block(256)
self.dense_block_2_6 = self.dense_block(288)
self.dense_block_2_7 = self.dense_block(320)
self.dense_block_2_8 = self.dense_block(352)
self.dense_block_2_9 = self.dense_block(384)
self.dense_block_2_10 = self.dense_block(416)
self.dense_block_2_11 = self.dense_block(448)
self.dense_block_2_12 = self.dense_block(480)
# transition block 2
self.transition_block_2 = torch.nn.Sequential(
torch.nn.Conv2d(512, 256, kernel_size=1, bias=False),
torch.nn.AvgPool2d(kernel_size=2, stride=2)
)
# dense block 3 (14 x 14 x 240)
self.dense_block_3_1 = self.dense_block(256)
self.dense_block_3_2 = self.dense_block(288)
self.dense_block_3_3 = self.dense_block(320)
self.dense_block_3_4 = self.dense_block(352)
self.dense_block_3_5 = self.dense_block(384)
self.dense_block_3_6 = self.dense_block(416)
self.dense_block_3_7 = self.dense_block(448)
self.dense_block_3_8 = self.dense_block(480)
self.dense_block_3_9 = self.dense_block(512)
self.dense_block_3_10 = self.dense_block(544)
self.dense_block_3_11 = self.dense_block(576)
self.dense_block_3_12 = self.dense_block(608)
self.dense_block_3_13 = self.dense_block(640)
self.dense_block_3_14 = self.dense_block(672)
self.dense_block_3_15 = self.dense_block(704)
self.dense_block_3_16 = self.dense_block(736)
self.dense_block_3_17 = self.dense_block(768)
self.dense_block_3_18 = self.dense_block(800)
self.dense_block_3_19 = self.dense_block(832)
self.dense_block_3_20 = self.dense_block(864)
self.dense_block_3_21 = self.dense_block(896)
self.dense_block_3_22 = self.dense_block(928)
self.dense_block_3_23 = self.dense_block(960)
self.dense_block_3_24 = self.dense_block(992)
# transition block 3
self.transition_block_3 = torch.nn.Sequential(
torch.nn.Conv2d(1024, 512, kernel_size=1, bias=False),
torch.nn.AvgPool2d(kernel_size=2, stride=2)
)
# dense block 4 (7 x 7 x 512)
self.dense_block_4_1 = self.dense_block(512)
self.dense_block_4_2 = self.dense_block(544)
self.dense_block_4_3 = self.dense_block(576)
self.dense_block_4_4 = self.dense_block(608)
self.dense_block_4_5 = self.dense_block(640)
self.dense_block_4_6 = self.dense_block(672)
self.dense_block_4_7 = self.dense_block(704)
self.dense_block_4_8 = self.dense_block(736)
self.dense_block_4_9 = self.dense_block(768)
self.dense_block_4_10 = self.dense_block(800)
self.dense_block_4_11 = self.dense_block(832)
self.dense_block_4_12 = self.dense_block(864)
self.dense_block_4_13 = self.dense_block(896)
self.dense_block_4_14 = self.dense_block(928)
self.dense_block_4_15 = self.dense_block(960)
self.dense_block_4_16 = self.dense_block(992)
self.avgpool = torch.nn.AdaptiveAvgPool2d(7)
self.classifier = torch.nn.Sequential(
torch.nn.Linear(1024 * 7 * 7, num_classes)
)
def forward(self, x):
x = self.features(x)
# dense block 1
x_1 = self.dense_block_1_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_1_6(x)
x = torch.cat([x, x_1], 1)
# transition block 1
x = self.transition_block_1(x)
# dense block 2
x_1 = self.dense_block_2_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_6(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_7(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_8(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_9(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_10(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_11(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_2_12(x)
x = torch.cat([x, x_1], 1)
# transition block 2
x = self.transition_block_2(x)
# dense block 3
x_1 = self.dense_block_3_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_6(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_7(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_8(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_9(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_10(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_11(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_12(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_13(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_14(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_15(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_16(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_17(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_18(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_19(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_20(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_21(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_22(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_23(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_3_24(x)
x = torch.cat([x, x_1], 1)
# transition block 3
x = self.transition_block_3(x)
# dense block 4
x_1 = self.dense_block_4_1(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_2(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_3(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_4(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_5(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_6(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_7(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_8(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_9(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_10(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_11(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_12(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_13(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_14(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_15(x)
x = torch.cat([x, x_1], 1)
x_1 = self.dense_block_4_16(x)
x = torch.cat([x, x_1], 1)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| 37.582707 | 83 | 0.577873 | [
"MIT"
] | arp95/cnn_architectures_image_classification | models/densenet121.py | 9,997 | Python |
"""Illustrates a method to intercept changes on objects, turning
an UPDATE statement on a single row into an INSERT statement, so that a new
row is inserted with the new data, keeping the old row intact.
This example adds a numerical version_id to the Versioned class as well
as the ability to see which row is the most "current" vesion.
"""
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import column_property
from sqlalchemy.orm import make_transient
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
class Versioned(object):
# we have a composite primary key consisting of "id"
# and "version_id"
id = Column(Integer, primary_key=True)
version_id = Column(Integer, primary_key=True, default=1)
# optional - add a persisted is_current_version column
is_current_version = Column(Boolean, default=True)
# optional - add a calculated is_current_version column
@classmethod
def __declare_last__(cls):
alias = cls.__table__.alias()
cls.calc_is_current_version = column_property(
select(func.max(alias.c.version_id) == cls.version_id).where(
alias.c.id == cls.id
)
)
def new_version(self, session):
# optional - set previous version to have is_current_version=False
old_id = self.id
session.query(self.__class__).filter_by(id=old_id).update(
values=dict(is_current_version=False), synchronize_session=False
)
# make us transient (removes persistent
# identity).
make_transient(self)
# increment version_id, which means we have a new PK.
self.version_id += 1
@event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.dirty:
if not isinstance(instance, Versioned):
continue
if not session.is_modified(instance, passive=True):
continue
if not attributes.instance_state(instance).has_identity:
continue
# make it transient
instance.new_version(session)
# re-add
session.add(instance)
Base = declarative_base()
engine = create_engine("sqlite://", echo=True)
Session = sessionmaker(engine)
# example 1, simple versioning
class Example(Versioned, Base):
__tablename__ = "example"
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(id=1, data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert (
session.query(
Example.id,
Example.version_id,
Example.is_current_version,
Example.calc_is_current_version,
Example.data,
)
.order_by(Example.id, Example.version_id)
.all()
== ([(1, 1, False, False, "e1"), (1, 2, True, True, "e2")])
)
# example 2, versioning with a parent
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer)
child_version_id = Column(Integer)
child = relationship("Child", backref=backref("parent", uselist=False))
__table_args__ = (
ForeignKeyConstraint(
["child_id", "child_version_id"], ["child.id", "child.version_id"]
),
)
class Child(Versioned, Base):
__tablename__ = "child"
data = Column(String)
def new_version(self, session):
# expire parent's reference to us
session.expire(self.parent, ["child"])
# create new version
Versioned.new_version(self, session)
# re-add ourselves to the parent. this causes the
# parent foreign key to be updated also
self.parent.child = self
Base.metadata.create_all(engine)
session = Session()
p1 = Parent(child=Child(id=1, data="c1"))
session.add(p1)
session.commit()
p1.child.data = "c2"
session.commit()
assert p1.child_id == 1
assert p1.child.version_id == 2
assert (
session.query(
Child.id,
Child.version_id,
Child.is_current_version,
Child.calc_is_current_version,
Child.data,
)
.order_by(Child.id, Child.version_id)
.all()
== ([(1, 1, False, False, "c1"), (1, 2, True, True, "c2")])
)
| 26.322034 | 78 | 0.683408 | [
"MIT"
] | 418sec/sqlalchemy | examples/versioned_rows/versioned_rows_w_versionid.py | 4,659 | Python |
import numpy as np
from testbed.cluster_env import LraClusterEnv
from testbed.PolicyGradient_CPO import PolicyGradient
params = {
# 'path': "Dynamic_large_100",
# 'path': "Dynamic_large_100_limit10",
# 'number of containers': 81,
'learning rate': 0.015,
'nodes per group': 3,
'number of nodes in the cluster': 27,
'container_limitation per node':8
}
def handle_constraint(observation, NUM_NODES):
observation_original = observation.copy()
mapping_index = []
# TODO: we could add more constraints here
list_check = observation[:, :].sum(1) > params['container_limitation per node'] - 1 # >8
if sum(list_check) == NUM_NODES:
return [],[]
good_index = np.where(list_check == False)[0]
length = len(good_index)
index_replace = 0
for node in range(NUM_NODES):
if list_check[node]: # bad node
# index_this_replace = good_index[np.random.randint(length)]
index_this_replace = good_index[index_replace % length]
index_replace += 1
observation[node] = observation_original[index_this_replace]
mapping_index.append(index_this_replace)
else:
mapping_index.append(node)
observation[node] = observation_original[node]
return observation, mapping_index
class NineNodeAPI():
def __init__(self, path_name, surffix, path_surffix):
"""
parameters set
"""
self.NUM_NODES = params['number of nodes in the cluster']
# self.NUM_CONTAINERS = params['number of containers']
# self.sim = Simulator()
self.env = LraClusterEnv(num_nodes=self.NUM_NODES)
ckpt_path_1 = path_surffix + path_name + "1" + "/model.ckpt"
ckpt_path_2 = path_surffix + path_name + "2" + "/model.ckpt"
ckpt_path_3 = path_surffix + path_name + "3" + "/model.ckpt"
self.nodes_per_group = int(params['nodes per group'])
# self.number_of_node_groups = int(self.NUM_NODES / self.nodes_per_group)
"""
Build Network
"""
self.n_actions = self.nodes_per_group #: 3 nodes per group
self.n_features = int(self.n_actions * (self.env.NUM_APPS + 1 + self.env.NUM_APPS) + 1 + self.env.NUM_APPS)
#: 29
self.RL_1 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '1a')
self.RL_2 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '2a')
self.RL_3 = PolicyGradient(n_actions=self.n_actions, n_features=self.n_features, learning_rate=params['learning rate'], suffix=surffix + '3a')
self.RL_1.restore_session(ckpt_path_1)
self.RL_2.restore_session(ckpt_path_2)
self.RL_3.restore_session(ckpt_path_3)
self.observation_episode_1, self.action_episode_1, self.reward_episode_1, self.safety_episode_1 = [], [], [], []
self.observation_optimal_1, self.action_optimal_1, self.reward_optimal_1, self.safety_optimal_1 = [], [], [], []
self.observation_episode_2, self.action_episode_2, self.reward_episode_2, self.safety_episode_2 = [], [], [], []
self.observation_optimal_2, self.action_optimal_2, self.reward_optimal_2, self.safety_optimal_2 = [], [], [], []
self.observation_episode_3, self.action_episode_3, self.reward_episode_3, self.safety_episode_3 = [], [], [], []
self.observation_optimal_3, self.action_optimal_3, self.reward_optimal_3, self.safety_optimal_3 = [], [], [], []
def batch_data(self, rnd_array):
index_data = []
for i in range(7):
index_data.extend([i] * rnd_array[i])
return rnd_array, index_data
def batch_data_sub(self, rnd_array):
rnd_array = rnd_array.copy()
index_data = []
for i in range(7):
index_data.extend([i] * int(rnd_array[i]))
return rnd_array, index_data
def store_episode_1(self, observations, actions):
self.observation_episode_1.append(observations)
self.action_episode_1.append(actions)
def store_episode_2(self, observations, actions):
self.observation_episode_2.append(observations)
self.action_episode_2.append(actions)
def store_episode_3(self, observations, actions):
self.observation_episode_3.append(observations)
self.action_episode_3.append(actions)
def get_total_tput(self, rnd_array):
# assert sum(rnd_array) == 81
source_batch_, index_data = self.batch_data(rnd_array.astype(int)) # index_data = [0,1,2,0,1,2]
env = LraClusterEnv(num_nodes=self.NUM_NODES)
observation = env.reset().copy() # (9,9)
source_batch = source_batch_.copy()
nodes_per_group = int(params['nodes per group'])
NUM_CONTAINERS = int(sum(rnd_array))
"""
Episode
"""
"""
first layer
"""
source_batch_first = source_batch_.copy()
observation_first_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
for inter_episode_index in range(NUM_CONTAINERS):
appid = index_data[inter_episode_index]
source_batch_first[appid] -= 1
observation_first_layer_copy = observation_first_layer.copy()
observation_first_layer_copy[:, appid] += 1
observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy > 9 * 2, axis=1)
observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)
# observation_first_layer_copy = np.append(observation_first_layer_copy, ((observation_first_layer_copy[:, 2] > 0) * (observation_first_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)
observation_first_layer_copy = np.array(observation_first_layer_copy).reshape(1, -1)
observation_first_layer_copy = np.append(observation_first_layer_copy, appid).reshape(1, -1)
observation_first_layer_copy = np.append(observation_first_layer_copy, np.array(source_batch_first)).reshape(1, -1)
action_1, prob_weights = self.RL_1.choose_action_determine(observation_first_layer_copy.copy())
observation_first_layer[action_1, appid] += 1
# self.store_episode_1(observation_first_layer_copy, action_1)
"""
second layer
"""
observation_second_layer_aggregation = np.empty([0, env.NUM_APPS], int) # 9*20
number_cont_second_layer = []
for second_layer_index in range(nodes_per_group):
rnd_array = observation_first_layer[second_layer_index].copy()
source_batch_second, index_data = self.batch_data_sub(rnd_array)
observation_second_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
NUM_CONTAINERS_second = sum(source_batch_second)
number_cont_second_layer.append(NUM_CONTAINERS_second)
for inter_episode_index in range(NUM_CONTAINERS_second):
appid = index_data[inter_episode_index]
source_batch_second[appid] -= 1
observation_second_layer_copy = observation_second_layer.copy()
observation_second_layer_copy[:, appid] += 1
observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy > 3 * 2, axis=1)
observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)
# observation_second_layer_copy = np.append(observation_second_layer_copy, ((observation_second_layer_copy[:, 2] > 0) * (observation_second_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)
observation_second_layer_copy = np.array(observation_second_layer_copy).reshape(1, -1)
observation_second_layer_copy = np.append(observation_second_layer_copy, appid).reshape(1, -1)
observation_second_layer_copy = np.append(observation_second_layer_copy, np.array(source_batch_second)).reshape(1, -1)
action_2, prob_weights = self.RL_2.choose_action_determine(observation_second_layer_copy.copy())
observation_second_layer[action_2, appid] += 1
# self.store_episode_2(observation_second_layer_copy, action_2)
observation_second_layer_aggregation = np.append(observation_second_layer_aggregation, observation_second_layer, 0)
"""
third layer
"""
observation_third_layer_aggregation = np.empty([0, env.NUM_APPS], int) # 9*20
number_cont_third_layer = []
for third_layer_index in range(nodes_per_group * nodes_per_group):
rnd_array = observation_second_layer_aggregation[third_layer_index].copy()
source_batch_third, index_data = self.batch_data_sub(rnd_array)
observation_third_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
NUM_CONTAINERS_third = sum(source_batch_third)
number_cont_third_layer.append(NUM_CONTAINERS_third)
for inter_episode_index in range(NUM_CONTAINERS_third):
appid = index_data[inter_episode_index]
source_batch_third[appid] -= 1
observation_third_layer_copy = observation_third_layer.copy()
observation_third_layer_copy[:, appid] += 1
observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy > 1 * 2, axis=1)
observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy.sum(axis=1).reshape(nodes_per_group, 1), axis=1)
# observation_third_layer_copy = np.append(observation_third_layer_copy, ((observation_third_layer_copy[:, 2] > 0) * (observation_third_layer_copy[:, 3] > 0)).reshape(nodes_per_group, 1), axis=1)
observation_third_layer_copy = np.array(observation_third_layer_copy).reshape(1, -1)
observation_third_layer_copy = np.append(observation_third_layer_copy, appid).reshape(1, -1)
observation_third_layer_copy = np.append(observation_third_layer_copy, np.array(source_batch_third)).reshape(1, -1)
action_3, prob_weights = self.RL_3.choose_action_determine(observation_third_layer_copy.copy())
observation_third_layer[action_3, appid] += 1
# self.store_episode_3(observation_third_layer_copy, action_3)
observation_third_layer_aggregation = np.append(observation_third_layer_aggregation, observation_third_layer, 0)
"""
After an entire allocation, calculate total throughput, reward
"""
env.state = observation_third_layer_aggregation.copy()
assert sum(sum(env.state)) == NUM_CONTAINERS
assert (env.state.sum(0) == source_batch_).all()
"""
After an entire allocation, calculate total throughput, reward
"""
# state = env.state
# assert sum(sum(self.env.state)) == 81
return env.state
| 47.34728 | 215 | 0.680983 | [
"Apache-2.0"
] | George-RL-based-container-sche/George | testbed/SubScheduler.py | 11,316 | Python |
# Advent of Code 2021 - Day: 24
# Imports (Always imports data based on the folder and file name)
from aocd import data, submit
def solve(lines):
# We need to simply find all the pairs of numbers, i.e. the numbers on lines 6 and 16 and store them.
pairs = [(int(lines[i * 18 + 5][6:]), int(lines[i * 18 + 15][6:])) for i in range(14)]
# Once getting the pairs we will need a stack and a map to store the pairs, as well constraints.
stack = []
constraints = {}
# Enumerate helps because we can get the index of the pair at the same time.
for i, (a, b) in enumerate(pairs):
# If (line 6) is positive we need to add line 16 and index to stack, else pop the last element from the stack and add it to constraints.
if a > 0:
stack.append((i, b))
else:
k, bk = stack.pop()
constraints[i] = (k, bk + a)
# At this point the constraints are stored at the relevant index for which they affect and can be used to find the minimum or maximum element at that index in the answer.
max_ans = {}
min_ans = {}
for i, (k, d) in constraints.items():
max_ans[i] = min(9, 9 + d)
max_ans[k] = min(9, 9 - d)
min_ans[i] = max(1, 1 + d)
min_ans[k] = max(1, 1 - d)
p1 = "".join(str(max_ans[i]) for i in range(14))
p2 = "".join(str(min_ans[i]) for i in range(14))
print("Star 1:", p1)
print("Star 2:", p2)
submit(p1, part="a", day=24, year=2021)
submit(p2, part="b", day=24, year=2021)
# Solution
def main():
solve(data.splitlines())
# Call the main function.
if __name__ == '__main__':
main() | 34.409091 | 171 | 0.655218 | [
"Unlicense"
] | Azurealistic/Winter | Solutions/2021/24.py | 1,514 | Python |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Meta tests for mappers.
The test checks the output of the swapper to a ground truth DAG (one for each
test/swapper) saved in as a QASM (in `test/python/qasm/`). If they need
to be regenerated, the DAG candidate is compiled and run in a simulator and
the count is checked before being saved. This happens with (in the root
directory):
> python -m test.python.transpiler.test_mappers regenerate
To make a new swapper pass throw all the common tests, create a new class inside the file
`path/to/test_mappers.py` that:
* the class name should start with `Tests...`.
* inheriting from ``SwapperCommonTestCases, QiskitTestCase``
* overwrite the required attribute ``pass_class``
For example::
class TestsSomeSwap(SwapperCommonTestCases, QiskitTestCase):
pass_class = SomeSwap # The pass class
additional_args = {'seed_transpiler': 42} # In case SomeSwap.__init__ requires
# additional arguments
To **add a test for all the swappers**, add a new method ``test_foo``to the
``SwapperCommonTestCases`` class:
* defining the following required ``self`` attributes: ``self.count``,
``self.shots``, ``self.delta``. They are required for the regeneration of the
ground truth.
* use the ``self.assertResult`` assertion for comparing for regeneration of the
ground truth.
* explicitly set a unique ``name`` of the ``QuantumCircuit``, as it it used
for the name of the QASM file of the ground truth.
For example::
def test_a_common_test(self):
self.count = {'000': 512, '110': 512} # The expected count for this circuit
self.shots = 1024 # Shots to run in the backend.
self.delta = 5 # This is delta for the AlmostEqual during
# the count check
coupling_map = [[0, 1], [0, 2]] # The coupling map for this specific test
qr = QuantumRegister(3, 'q') #
cr = ClassicalRegister(3, 'c') # Set the circuit to test
circuit = QuantumCircuit(qr, cr, # and don't forget to put a name
name='some_name') # (it will be used to save the QASM
circuit.h(qr[1]) #
circuit.cx(qr[1], qr[2]) #
circuit.measure(qr, cr) #
result = transpile(circuit, self.create_backend(), coupling_map=coupling_map,
pass_manager=self.create_passmanager(coupling_map))
self.assertResult(result, circuit)
```
"""
# pylint: disable=attribute-defined-outside-init
import unittest
import sys
import os
from qiskit import execute
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit, BasicAer
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import BasicSwap, LookaheadSwap, StochasticSwap, SabreSwap
from qiskit.transpiler.passes import SetLayout
from qiskit.transpiler import CouplingMap, Layout
from qiskit.test import QiskitTestCase
DIRNAME = QiskitTestCase._get_resource_path('qasm')
class CommonUtilitiesMixin:
"""Utilities for meta testing.
Subclasses should redefine the ``pass_class`` argument, with a Swap Mapper
class.
Note: This class assumes that the subclass is also inheriting from
``QiskitTestCase``, and it uses ``QiskitTestCase`` methods directly.
"""
regenerate_expected = False
seed_simulator = 42
seed_transpiler = 42
additional_args = {}
pass_class = None
def create_passmanager(self, coupling_map, initial_layout=None):
"""Returns a PassManager using self.pass_class(coupling_map, initial_layout)"""
passmanager = PassManager()
if initial_layout:
passmanager.append(SetLayout(Layout(initial_layout)))
# pylint: disable=not-callable
passmanager.append(self.pass_class(CouplingMap(coupling_map), **self.additional_args))
return passmanager
def create_backend(self):
"""Returns a Backend."""
return BasicAer.get_backend('qasm_simulator')
def generate_ground_truth(self, transpiled_result, filename):
"""Generates the expected result into a file.
Checks if transpiled_result matches self.counts by running in a backend
(self.create_backend()). That's saved in a QASM in filename.
Args:
transpiled_result (DAGCircuit): The DAGCircuit to execute.
filename (string): Where the QASM is saved.
"""
sim_backend = self.create_backend()
job = execute(transpiled_result, sim_backend, seed_simulator=self.seed_simulator,
seed_transpiler=self.seed_transpiler, shots=self.shots)
self.assertDictAlmostEqual(self.counts, job.result().get_counts(), delta=self.delta)
transpiled_result.qasm(formatted=False, filename=filename)
def assertResult(self, result, circuit):
"""Fetches the QASM in circuit.name file and compares it with result."""
qasm_name = '%s_%s.qasm' % (type(self).__name__, circuit.name)
filename = os.path.join(DIRNAME, qasm_name)
if self.regenerate_expected:
# Run result in backend to test that is valid.
self.generate_ground_truth(result, filename)
expected = QuantumCircuit.from_qasm_file(filename)
self.assertEqual(result, expected)
class SwapperCommonTestCases(CommonUtilitiesMixin):
"""Tests that are run in several mappers.
The tests here will be run in several mappers. When adding a test, please
ensure that the test:
* defines ``self.count``, ``self.shots``, ``self.delta``.
* uses the ``self.assertResult`` assertion for comparing for regeneration of
the ground truth.
* explicitly sets a unique ``name`` of the ``QuantumCircuit``.
See also ``CommonUtilitiesMixin`` and the module docstring.
"""
def test_a_cx_to_map(self):
"""A single CX needs to be remapped.
q0:----------m-----
|
q1:-[H]-(+)--|-m---
| | |
q2:------.---|-|-m-
| | |
c0:----------.-|-|-
c1:------------.-|-
c2:--------------.-
CouplingMap map: [1]<-[0]->[2]
expected count: '000': 50%
'110': 50%
"""
self.counts = {'000': 512, '110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2]]
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr, name='a_cx_to_map')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
def test_initial_layout(self):
"""Using a non-trivial initial_layout.
q3:----------------m--
q0:----------m-----|--
| |
q1:-[H]-(+)--|-m---|--
| | | |
q2:------.---|-|-m-|--
| | | |
c0:----------.-|-|-|--
c1:------------.-|-|--
c2:--------------.-|--
c3:----------------.--
CouplingMap map: [1]<-[0]->[2]->[3]
expected count: '000': 50%
'110': 50%
"""
self.counts = {'0000': 512, '0110': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [0, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='initial_layout')
circuit.h(qr[1])
circuit.cx(qr[1], qr[2])
circuit.measure(qr, cr)
layout = {qr[3]: 0, qr[0]: 1, qr[1]: 2, qr[2]: 3}
result = self.create_passmanager(coupling_map, layout).run(circuit)
self.assertResult(result, circuit)
def test_handle_measurement(self):
"""Handle measurement correctly.
q0:--.-----(+)-m-------
| | |
q1:-(+)-(+)-|--|-m-----
| | | |
q2:------|--|--|-|-m---
| | | | |
q3:-[H]--.--.--|-|-|-m-
| | | |
c0:------------.-|-|-|-
c1:--------------.-|-|-
c2:----------------.-|-
c3:------------------.-
CouplingMap map: [0]->[1]->[2]->[3]
expected count: '0000': 50%
'1011': 50%
"""
self.counts = {'1011': 512, '0000': 512}
self.shots = 1024
self.delta = 5
coupling_map = [[0, 1], [1, 2], [2, 3]]
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr, name='handle_measurement')
circuit.h(qr[3])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[3], qr[1])
circuit.cx(qr[3], qr[0])
circuit.measure(qr, cr)
result = self.create_passmanager(coupling_map).run(circuit)
self.assertResult(result, circuit)
class TestsBasicSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using BasicSwap."""
pass_class = BasicSwap
class TestsLookaheadSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using LookaheadSwap."""
pass_class = LookaheadSwap
class TestsStochasticSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using StochasticSwap."""
pass_class = StochasticSwap
additional_args = {'seed': 0}
class TestsSabreSwap(SwapperCommonTestCases, QiskitTestCase):
"""Test SwapperCommonTestCases using SabreSwap."""
pass_class = SabreSwap
additional_args = {'seed': 0}
if __name__ == '__main__':
if len(sys.argv) >= 2 and sys.argv[1] == 'regenerate':
CommonUtilitiesMixin.regenerate_expected = True
del sys.argv[1]
unittest.main()
| 35.394649 | 94 | 0.587452 | [
"Apache-2.0"
] | 7338/qiskit-terra | test/python/transpiler/test_mappers.py | 10,583 | Python |
from testutils import assert_raises
try:
b" \xff".decode("ascii")
except UnicodeDecodeError as e:
assert e.start == 3
assert e.end == 4
else:
assert False, "should have thrown UnicodeDecodeError"
assert_raises(UnicodeEncodeError, "¿como estás?".encode, "ascii")
def round_trip(s, encoding="utf-8"):
encoded = s.encode(encoding)
decoded = encoded.decode(encoding)
assert s == decoded
round_trip("👺♦ 𝐚Şđƒ ☆☝")
round_trip("☢🐣 ᖇ𝓤𝕊тⓟ𝕐𝕥卄σ𝔫 ♬👣")
round_trip("💀👌 ק𝔂tℍⓞ𝓷 3 🔥👤")
| 24.238095 | 66 | 0.667976 | [
"MIT"
] | JesterOrNot/RustPython | tests/snippets/encoding.py | 584 | Python |
from ..utils.core import concatenate
class StreamList(list):
"""Class to replace a basic list for streamed products
"""
def __init__(self, product):
if isinstance(product, list):
super(StreamList, self).__init__(product)
else:
super(StreamList, self).__init__([product])
if len(self) > 10000:
raise ValueError("StreamList can't be longer than 10000 because the filenames for caching are not adequate")
self._cached_aggregate = None
def aggregate(self):
if self._cached_aggregate is None:
return concatenate(self)
else:
return self._cached_aggregate
| 28.291667 | 120 | 0.639175 | [
"MIT"
] | guitargeek/geeksw | geeksw/framework/stream.py | 679 | Python |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from python_api.users import models as user_models
from python_api.images import models as image_models
@python_2_unicode_compatible
class Notification(image_models.TimeStampedModel):
TYPE_CHOICES = (
('like', 'Like'),
('comment', 'Comment'),
('follow', 'Follow')
)
creator = models.ForeignKey(user_models.User, related_name='creator')
to = models.ForeignKey(user_models.User, related_name='to')
notification_type = models.CharField(max_length=20, choices=TYPE_CHOICES)
image = models.ForeignKey(image_models.Image, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
class Meat:
ordering = ['-created_at']
def __str__(self):
return 'From {} {}'.format(self.creator, self.to)
| 34.72 | 77 | 0.717742 | [
"MIT"
] | hyecheon/python_api | python_api/notifications/models.py | 868 | Python |
"""
Django settings for django_i18n_example project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from os.path import join
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3pykyaqk#*wgwp%$3l@9_az$_9m^-3z3xkbcm!fitj9w!1c802'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Order is important here
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_i18n_example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'django_i18n_example', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_i18n_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGES = (
('en-us', 'English (US)'),
('de', 'Deutsche'),
('ar', 'عربى'),
)
LANGUAGE_CODE = 'en-us'
LOCALE_PATHS = [
join(BASE_DIR, 'django_i18n_example', 'locale'),
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# Enable our static JS file serving
STATICFILES_DIRS = (
join(BASE_DIR, 'django_i18n_example', "static"),
)
| 25.219858 | 91 | 0.691507 | [
"MIT"
] | localizely/django_i18n_example | django_i18n_example/settings.py | 3,560 | Python |
#!/usr/bin/python
# coding:utf-8
'''
Created on 2017-04-06
Update on 2017-11-17
Author: Peter/ApacheCN-xy/片刻
GitHub: https://github.com/apachecn/MachineLearning
'''
import sys
from numpy import mat, mean, power
'''
这个mapper文件按行读取所有的输入并创建一组对应的浮点数,然后得到数组的长度并创建NumPy矩阵。
再对所有的值进行平方,最后将均值和平方后的均值发送出去。这些值将用来计算全局的均值和方差。
Args:
file 输入数据
Return:
'''
def read_input(file):
for line in file:
yield line.rstrip() # 返回一个 yield 迭代器,每次获取下一个值,节约内存。
input = read_input(sys.stdin) # 创建一个输入的数据行的列表list
input = [float(line) for line in input] # 将得到的数据转化为 float 类型
numInputs = len(input) # 获取数据的个数,即输入文件的数据的行数
input = mat(input) # 将 List 转换为矩阵
sqInput = power(input, 2) # 将矩阵的数据分别求 平方,即 2次方
# 输出 数据的个数,n个数据的均值,n个数据平方之后的均值
# 第一行是标准输出,也就是reducer的输出
# 第二行识标准错误输出,即对主节点作出的响应报告,表明本节点工作正常。
# 【这不就是面试的装逼重点吗?如何设计监听架构细节】注意:一个好的习惯是想标准错误输出发送报告。如果某任务10分钟内没有报告输出,则将被Hadoop中止。
print("%d\t%f\t%f" % (numInputs, mean(input), mean(sqInput))) # 计算均值
print("map report: still alive", file=sys.stderr)
| 26.512195 | 78 | 0.677093 | [
"Apache-2.0"
] | cherisyu/ML_in_Action | ML-in-Action/MachineLearning-dev/src/py3.x/ML/15.BigData_MapReduce/mrMeanMapper.py | 1,737 | Python |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 4 17:07:18 2016
@author: sshank
"""
# Print out the required annotations at the moment... change to put into MySQL
from Bio.Seq import Seq
from Bio import AlignIO
from Bio.SeqRecord import SeqRecord
from argparse import ArgumentParser
parser = ArgumentParser()
rst_help = 'Path to parsed RST file (created with parse_rst.py).'
parser.add_argument('-r', '--rst', metavar='RST', help=rst_help, dest='rst')
input_help = 'Path to input fasta file (aligned).'
parser.add_argument('-i', '--input', metavar='INPUT', help=input_help, dest='input')
args = parser.parse_args()
rst_filename = args.rst
input_filename = args.input
descendent_sequence = ''
ancestral_sequence = ''
descendent_annotations = []
descendent_changes = []
with open(rst_filename, 'r') as file:
for line in file:
split = line.split()
descendent_codon = split[6]
ancestral_codon = split[16]
if descendent_codon != '---':
descendent_amino_acid = Seq(descendent_codon).translate()
descendent_sequence += str(descendent_amino_acid)
if descendent_codon == ancestral_codon or ancestral_codon == '---':
# No change or missing information
descendent_annotations.append(0)
descendent_changes.append('-')
else:
ancestral_amino_acid = Seq(ancestral_codon).translate()
if descendent_amino_acid == ancestral_amino_acid:
# Synonymous change
descendent_annotations.append(1)
change = ancestral_codon + '->' + descendent_codon
descendent_changes.append(change)
else:
# Nonsynonymous change
descendent_annotations.append(2)
change = str(ancestral_amino_acid) + '->' + str(descendent_amino_acid)
descendent_changes.append(change)
taed_descendent = SeqRecord(descendent_sequence, id='taed_descendent')
pdb_annotations = []
pdb_changes = []
alignment = AlignIO.read(input_filename, 'fasta')
d_index = 0
p_index = 0
for k in range(alignment.get_alignment_length()):
descendent_amino_acid, pdb_amino_acid = alignment[:, k]
if pdb_amino_acid != '-' and descendent_amino_acid != '-':
# There is a chance that something happened... append and increment both
pdb_annotations.append(descendent_annotations[d_index])
pdb_changes.append(descendent_changes[d_index])
p_index += 1
d_index += 1
else:
if pdb_amino_acid != '-':
pdb_annotations.append(0)
pdb_changes.append('-')
p_index += 1
if descendent_amino_acid != '-':
d_index += 1
print(','.join([str(i) for i in pdb_annotations]))
print('\n')
print("'" + "','".join([str(i) for i in pdb_changes])+ "'")
| 36.2875 | 90 | 0.635549 | [
"MIT"
] | stephenshank/taed-pv | create_pdb_annotations.py | 2,903 | Python |
#!/usr/bin/env python
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension
cmdclass = {}
cmdclass['build_ext'] = BuildExtension
import setuptools
ext_modules = [
CppExtension(name='torch_blocksparse_cpp_utils',
sources=['csrc/utils.cpp'],
extra_compile_args={'cxx': ['-O2',
'-fopenmp']})
]
setuptools.setup(
name = 'torch-blocksparse',
version = '1.1.1',
description = 'Block-sparse primitives for PyTorch',
author = 'Philippe Tillet',
maintainer = 'Philippe Tillet',
maintainer_email = '[email protected]',
install_requires = ['triton', 'torch'],
url = 'https://github.com/ptillet/torch-blocksparse',
test_suite = 'nose.collector',
tests_require = ['nose', 'parameterized'],
license = 'MIT',
packages = find_packages(exclude=["csrc"]),
ext_modules = ext_modules,
cmdclass = cmdclass
)
| 30 | 70 | 0.589189 | [
"MIT"
] | akhti/torch-blocksparse | setup.py | 1,110 | Python |
XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXX X XXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXX XXXXX XXXXXXX XX XXXX XX X XXXXXX XX XXX XXXXX XX XXXXXX XXX XXX XXX XXXX XX XXXX
XXX XXXXXX XXXXXX XXXX XXXXX XXXXXXXXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXX XX X XXXXX XX XXXXX
XXXXXX XXXXXXXX XXXX XX XXXXXXXXXXXX XXXX XX XXXXX XXXXXXXXXX XXXX XXXXXXXXXX XXXXXXXXX XX XXX XXXXXXX
XXXXXX XX XXX XX X XXXX XXXXXXXXXX
XXXXXX XXXXXXXXXXX XXXXXXXXX XXX XXXXXXXXXX XXXX XXXXXX XXXX XXXX XXXXXX XXXXXX XXXX XXXXXX XXXX XXX
XXXXX XXXXXXX XX XXX XXX XX XXX XXXXX XX XXX XXXXXX XXX XXX XXXX XXXXX XXXXX X XXX XXXXX XXXX XX XXX
XXXXXXXX XXX XXXXXX XXX XXXX XXXXXXX XXXX XXXX XXXX XXX XXXXXX XXXXXX XXXXXX XXXXXXXXX XX XXXXXXX
XXXXXXX XX XXX XXXXXXXXXX
XXXXXXXXXXXXXX XX XXXXXXXXXXX XXXXX XXX XXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXX XXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXX
XXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXX
XXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXX
XXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXX
XXXXXXXXX XXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX
XXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX
XXXXX
XXXXXXXX
XXXXXXXX
XXX XXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXX
XXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XXXXXXXXXX XXX XXXXX XXXXX XX XXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX
X XXXXXXXXX
XXXXX XXXXXXXX XX XXX XXXXX XXXXX XXX XXXXXXXXX XXXXXXXXXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX
XXXXXXXXXXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXX XXXX XXXXX XXXXX XX XXX XXX XXXX XXXXX XXXXXXXX XXXXXX XX XXX XXXX XXXXXXXX XX
XXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXX
XXXXXXX XXXXXXX XXXX X XXXXXX XXX XX XXXXXXXXXX XXX XXXXXX XXXX XX XXXXXX XXXX XXX XXXXXXX
XXXXX XXXXXXXX XX XXXXX XX XXXXXXXXX XXXXXXX XXX XXXXXX XXX XXXXXXXXXX XXX XXXX XX XXXXX
XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXX
XXXXXX
XXXXXX XXXXXXXXX XXX XXXXXXX XXXXX XXX XXXXXX XXX XXX XX XXXX XXXXXXX XX XXXXXXX XXX XXXXXXX XX XXX
XXXXXXXXXX
XXXX
XXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXX XXXXXXXXXXXXX
XXXXXXX XXXXX XXXXX XXXX XX XXXXX XXX XXXXXX XXXX XXXX XXX XXXX XXXXXX XX XXXXX XXXXXX XXXX XXXX
XXXX XXXXXX XXXXXXXXXXXXX XX XXX XXXXXXXXXX XXXX XX XXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXXXX XXXXXX XXXX XX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX XXX XXXX XXXXX XX XXXXX XXXXXX XXXXXX XXXX
XXXX XXXX XX XXXX XX XXXXXXX XXXXXX XXXXX XXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXX XXX XX XXXXXXX XX XXX
XXXXXXXXX XXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXX XX XXX
XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXX
XXXXXXXXX
XXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXX XXXXX XXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXX XX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXX
XXXXXXXXXXX XXXXX XXX XXX XXXXXXXXXX XXX XXXXXXXXXXXX
XXXXXXXXXXXXX XXXXX XXX X XXXX XXXXX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXX XXX XXXXXXXXXXXX XX
XXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXXXXX XX XX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXXXXXXX XX XXXXXXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXX
XXXXXX
XXXXXX
XXXXXXXXXX
XXXXXXX
XXXXXXX | 26.042386 | 114 | 0.734583 | [
"MIT"
] | dnaextrim/django_adminlte_x | adminlte/static/plugins/datatables/extensions/FixedHeader/examples/simple.html.py | 16,589 | Python |
# Copyright 2012-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from .. import mparser
from .. import environment
from .. import coredata
from .. import dependencies
from .. import mlog
from .. import build
from .. import optinterpreter
from .. import compilers
from .. import envconfig
from ..wrap import wrap, WrapMode
from .. import mesonlib
from ..mesonlib import MesonBugException, HoldableObject, FileMode, MachineChoice, OptionKey, listify, extract_as_list, has_path_sep
from ..programs import ExternalProgram, NonExistingExternalProgram
from ..dependencies import Dependency
from ..depfile import DepFile
from ..interpreterbase import ContainerTypeInfo, InterpreterBase, KwargInfo, typed_kwargs, typed_pos_args
from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, noArgsFlattening, noSecondLevelHolderResolving, unholder_return
from ..interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from ..interpreterbase import Disabler, disablerIfNotFound
from ..interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs, FeatureDeprecatedKwargs
from ..interpreterbase import ObjectHolder
from ..modules import ExtensionModule, ModuleObject, MutableModuleObject, NewExtensionModule, NotFoundExtensionModule
from ..cmake import CMakeInterpreter
from ..backend.backends import ExecutableSerialisation
from . import interpreterobjects as OBJ
from . import compiler as compilerOBJ
from .mesonmain import MesonMain
from .dependencyfallbacks import DependencyFallbacksHolder
from .interpreterobjects import (
SubprojectHolder,
Test,
RunProcess,
extract_required_kwarg,
extract_search_dirs,
NullSubprojectInterpreter,
)
from .type_checking import (
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_OUTPUT_KW,
DEFAULT_OPTIONS,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
DISABLER_KW,
ENV_KW,
ENV_METHOD_KW,
ENV_SEPARATOR_KW,
INSTALL_KW,
INSTALL_MODE_KW,
CT_INSTALL_TAG_KW,
INSTALL_TAG_KW,
LANGUAGE_KW,
NATIVE_KW, OVERRIDE_OPTIONS_KW,
REQUIRED_KW,
NoneType,
in_set_validator,
env_convertor_with_method
)
from . import primitives as P_OBJ
from pathlib import Path
import os
import shutil
import uuid
import re
import stat
import collections
import typing as T
import textwrap
import importlib
if T.TYPE_CHECKING:
import argparse
from typing_extensions import Literal
from . import kwargs
from ..backend.backends import Backend
from ..interpreterbase.baseobjects import InterpreterObject, TYPE_var, TYPE_kwargs
from ..programs import OverrideProgram
# Input source types passed to Targets
SourceInputs = T.Union[mesonlib.File, build.GeneratedList, build.BuildTarget, build.BothLibraries,
build.CustomTargetIndex, build.CustomTarget, build.GeneratedList,
build.ExtractedObjects, str]
# Input source types passed to the build.Target classes
SourceOutputs = T.Union[mesonlib.File, build.GeneratedList,
build.BuildTarget, build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.GeneratedList, build.StructuredSources]
def _project_version_validator(value: T.Union[T.List, str, mesonlib.File, None]) -> T.Optional[str]:
if isinstance(value, list):
if len(value) != 1:
return 'when passed as array must have a length of 1'
elif not isinstance(value[0], mesonlib.File):
return 'when passed as array must contain a File'
return None
def stringifyUserArguments(args: T.List[T.Any], quote: bool = False) -> str:
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x, True) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['{} : {}'.format(stringifyUserArguments(k, True), stringifyUserArguments(v, True)) for k, v in args.items()])
elif isinstance(args, bool):
return 'true' if args else 'false'
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return f"'{args}'" if quote else args
raise InvalidArguments('Function accepts only strings, integers, bools, lists, dictionaries and lists thereof.')
class Summary:
def __init__(self, project_name: str, project_version: str):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section: str, values: T.Dict[str, T.Any], bool_yn: bool,
list_sep: T.Optional[str], subproject: str) -> None:
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException(f'Summary section {section!r} already have key {k!r}')
formatted_values = []
for i in listify(v):
if isinstance(i, bool) and bool_yn:
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
elif isinstance(i, (str, int, bool)):
formatted_values.append(str(i))
elif isinstance(i, (ExternalProgram, Dependency)):
FeatureNew.single_use('dependency or external program in summary', '0.57.0', subproject)
formatted_values.append(i.summary_value())
elif isinstance(i, coredata.UserOption):
FeatureNew.single_use('feature option in summary', '0.58.0', subproject)
formatted_values.append(i.printable_value())
else:
m = 'Summary value in section {!r}, key {!r}, must be string, integer, boolean, dependency or external program'
raise InterpreterException(m.format(section, k))
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('') # newline
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
padding = self.max_key_len - len(k)
end = ' ' if v else ''
mlog.log(' ' * 3, k + ' ' * padding + ':', end=end)
indent = self.max_key_len + 6
self.dump_value(v, list_sep, indent)
mlog.log('') # newline
def dump_value(self, arr, list_sep, indent):
lines_sep = '\n' + ' ' * indent
if list_sep is None:
mlog.log(*arr, sep=lines_sep)
return
max_len = shutil.get_terminal_size().columns
line = []
line_len = indent
lines_sep = list_sep.rstrip() + lines_sep
for v in arr:
v_len = len(v) + len(list_sep)
if line and line_len + v_len > max_len:
mlog.log(*line, sep=list_sep, end=lines_sep)
line_len = indent
line = []
line.append(v)
line_len += v_len
mlog.log(*line, sep=list_sep)
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
TEST_KWARGS: T.List[KwargInfo] = [
KwargInfo('args', ContainerTypeInfo(list, (str, mesonlib.File, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)),
listify=True, default=[]),
KwargInfo('should_fail', bool, default=False),
KwargInfo('timeout', int, default=30),
KwargInfo('workdir', (str, NoneType), default=None,
validator=lambda x: 'must be an absolute path' if not os.path.isabs(x) else None),
KwargInfo('protocol', str,
default='exitcode',
validator=in_set_validator({'exitcode', 'tap', 'gtest', 'rust'}),
since_values={'gtest': '0.55.0', 'rust': '0.57.0'}),
KwargInfo('priority', int, default=0, since='0.52.0'),
# TODO: env needs reworks of the way the environment variable holder itself works probably
ENV_KW,
DEPENDS_KW.evolve(since='0.46.0'),
KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string
KwargInfo('verbose', bool, default=False, since='0.62.0'),
]
permitted_dependency_kwargs = {
'allow_fallback',
'cmake_args',
'cmake_module_path',
'cmake_package_version',
'components',
'default_options',
'fallback',
'include_type',
'language',
'main',
'method',
'modules',
'native',
'not_found_message',
'optional_modules',
'private_headers',
'required',
'static',
'version',
}
implicit_check_false_warning = """You should add the boolean check kwarg to the run_command call.
It currently defaults to false,
but it will default to true in future releases of meson.
See also: https://github.com/mesonbuild/meson/issues/9300"""
class Interpreter(InterpreterBase, HoldableObject):
def __init__(
self,
_build: build.Build,
backend: T.Optional[Backend] = None,
subproject: str = '',
subdir: str = '',
subproject_dir: str = 'subprojects',
default_project_options: T.Optional[T.Dict[OptionKey, str]] = None,
mock: bool = False,
ast: T.Optional[mparser.CodeBlockNode] = None,
is_translated: bool = False,
user_defined_options: T.Optional['argparse.Namespace'] = None,
) -> None:
super().__init__(_build.environment.get_source_dir(), subdir, subproject)
self.active_projectname = ''
self.build = _build
self.environment = self.build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary: T.Dict[str, 'Summary'] = {}
self.modules: T.Dict[str, NewExtensionModule] = {}
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(self.build, self)})
self.generators: T.List[build.Generator] = []
self.processed_buildfiles = set() # type: T.Set[str]
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects: T.Dict[str, SubprojectHolder] = {}
self.subproject_stack: T.List[str] = []
self.configure_file_outputs: T.Dict[str, int] = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options: T.Dict[OptionKey, str] = {}
self.build_func_dict()
self.build_holder_map()
self.user_defined_options = user_defined_options
# build_def_files needs to be defined before parse_project is called
#
# For non-meson subprojects, we'll be using the ast. Even if it does
# exist we don't want to add a dependency on it, it's autogenerated
# from the actual build files, and is just for reference.
self.build_def_files: mesonlib.OrderedSet[str] = mesonlib.OrderedSet()
build_filename = os.path.join(self.subdir, environment.build_filename)
if not is_translated:
self.build_def_files.add(build_filename)
if not mock:
self.parse_project()
self._redetect_machines()
def __getnewargs_ex__(self) -> T.Tuple[T.Tuple[object], T.Dict[str, object]]:
raise MesonBugException('This class is unpicklable')
def _redetect_machines(self) -> None:
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.build, self)
self.builtin['host_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.host, self)
self.builtin['target_machine'] = \
OBJ.MachineHolder(self.build.environment.machines.target, self)
def build_func_dict(self) -> None:
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_languages': self.func_add_languages,
'add_project_arguments': self.func_add_project_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'both_libraries': self.func_both_lib,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'generator': self.func_generator,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'gettext': self.func_gettext,
'import': self.func_import,
'include_directories': self.func_include_directories,
'install_data': self.func_install_data,
'install_emptydir': self.func_install_emptydir,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'install_symlink': self.func_install_symlink,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'option': self.func_option,
'project': self.func_project,
'range': self.func_range,
'run_command': self.func_run_command,
'run_target': self.func_run_target,
'set_variable': self.func_set_variable,
'structured_sources': self.func_structured_sources,
'subdir': self.func_subdir,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'test': self.func_test,
'unset_variable': self.func_unset_variable,
'vcs_tag': self.func_vcs_tag,
'warning': self.func_warning,
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def build_holder_map(self) -> None:
'''
Build a mapping of `HoldableObject` types to their corresponding
`ObjectHolder`s. This mapping is used in `InterpreterBase` to automatically
holderify all returned values from methods and functions.
'''
self.holder_map.update({
# Primitives
list: P_OBJ.ArrayHolder,
dict: P_OBJ.DictHolder,
int: P_OBJ.IntegerHolder,
bool: P_OBJ.BooleanHolder,
str: P_OBJ.StringHolder,
P_OBJ.MesonVersionString: P_OBJ.MesonVersionStringHolder,
# Meson types
mesonlib.File: OBJ.FileHolder,
build.SharedLibrary: OBJ.SharedLibraryHolder,
build.StaticLibrary: OBJ.StaticLibraryHolder,
build.BothLibraries: OBJ.BothLibrariesHolder,
build.SharedModule: OBJ.SharedModuleHolder,
build.Executable: OBJ.ExecutableHolder,
build.Jar: OBJ.JarHolder,
build.CustomTarget: OBJ.CustomTargetHolder,
build.CustomTargetIndex: OBJ.CustomTargetIndexHolder,
build.Generator: OBJ.GeneratorHolder,
build.GeneratedList: OBJ.GeneratedListHolder,
build.ExtractedObjects: OBJ.GeneratedObjectsHolder,
build.RunTarget: OBJ.RunTargetHolder,
build.AliasTarget: OBJ.AliasTargetHolder,
build.Headers: OBJ.HeadersHolder,
build.Man: OBJ.ManHolder,
build.EmptyDir: OBJ.EmptyDirHolder,
build.Data: OBJ.DataHolder,
build.SymlinkData: OBJ.SymlinkDataHolder,
build.InstallDir: OBJ.InstallDirHolder,
build.IncludeDirs: OBJ.IncludeDirsHolder,
build.EnvironmentVariables: OBJ.EnvironmentVariablesHolder,
build.StructuredSources: OBJ.StructuredSourcesHolder,
compilers.RunResult: compilerOBJ.TryRunResultHolder,
dependencies.ExternalLibrary: OBJ.ExternalLibraryHolder,
coredata.UserFeatureOption: OBJ.FeatureOptionHolder,
envconfig.MachineInfo: OBJ.MachineHolder,
build.ConfigurationData: OBJ.ConfigurationDataHolder,
})
'''
Build a mapping of `HoldableObject` base classes to their
corresponding `ObjectHolder`s. The difference to `self.holder_map`
is that the keys here define an upper bound instead of requiring an
exact match.
The mappings defined here are only used when there was no direct hit
found in `self.holder_map`.
'''
self.bound_holder_map.update({
dependencies.Dependency: OBJ.DependencyHolder,
ExternalProgram: OBJ.ExternalProgramHolder,
compilers.Compiler: compilerOBJ.CompilerHolder,
ModuleObject: OBJ.ModuleObjectHolder,
MutableModuleObject: OBJ.MutableModuleObjectHolder,
})
def append_holder_map(self, held_type: T.Type[mesonlib.HoldableObject], holder_type: T.Type[ObjectHolder]) -> None:
'''
Adds one additional mapping to the `holder_map`.
The intended use for this function is in the `initialize` method of
modules to register custom object holders.
'''
self.holder_map.update({
held_type: holder_type
})
def process_new_values(self, invalues: T.List[T.Union[TYPE_var, ExecutableSerialisation]]) -> None:
invalues = listify(invalues)
for v in invalues:
if isinstance(v, ObjectHolder):
raise InterpreterException('Modules must not return ObjectHolders')
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.process_new_values(v)
elif isinstance(v, ExecutableSerialisation):
v.subproject = self.subproject
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, build.SymlinkData):
self.build.symlinks.append(v)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif isinstance(v, build.InstallDir):
self.build.install_dirs.append(v)
elif isinstance(v, Test):
self.build.tests.append(v)
elif isinstance(v, (int, str, bool, Disabler, ObjectHolder, build.GeneratedList,
ExternalProgram, build.ConfigurationData)):
pass
else:
raise InterpreterException(f'Module returned a value of unknown type {v!r}.')
def get_build_def_files(self) -> mesonlib.OrderedSet[str]:
return self.build_def_files
def add_build_def_file(self, f: mesonlib.FileOrString) -> None:
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
try:
f_ = Path(f).resolve()
except OSError:
f_ = Path(f)
s = f_.stat()
if (hasattr(s, 'st_file_attributes') and
s.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT != 0 and
s.st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK):
# This is a Windows Store link which we can't
# resolve, so just do our best otherwise.
f_ = f_.parent.resolve() / f_.name
else:
raise
if builddir in f_.parents:
return
if srcdir in f_.parents:
f_ = f_.relative_to(srcdir)
f = str(f_)
else:
return
if f not in self.build_def_files:
self.build_def_files.add(f)
def get_variables(self) -> T.Dict[str, InterpreterObject]:
return self.variables
def check_stdlibs(self) -> None:
machine_choices = [MachineChoice.HOST]
if self.coredata.is_cross_build():
machine_choices.append(MachineChoice.BUILD)
for for_machine in machine_choices:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
except KeyError:
continue
if len(di) == 1:
FeatureNew.single_use('stdlib without variable name', '0.56.0', self.subproject, location=self.current_node)
kwargs = {'native': for_machine is MachineChoice.BUILD,
}
name = l + '_stdlib'
df = DependencyFallbacksHolder(self, [name])
df.set_fallback(di)
dep = df.lookup(kwargs, force_fallback=True)
self.build.stdlibs[for_machine][l] = dep
def _import_module(self, modname: str, required: bool) -> NewExtensionModule:
if modname in self.modules:
return self.modules[modname]
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
if required:
raise InvalidArguments(f'Module "{modname}" does not exist')
ext_module = NotFoundExtensionModule()
else:
ext_module = module.initialize(self)
assert isinstance(ext_module, (ExtensionModule, NewExtensionModule))
self.build.modules.append(modname)
self.modules[modname] = ext_module
return ext_module
@typed_pos_args('import', str)
@typed_kwargs(
'import',
REQUIRED_KW.evolve(since='0.59.0'),
DISABLER_KW.evolve(since='0.59.0'),
)
@disablerIfNotFound
def func_import(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncImportModule') -> T.Union[ExtensionModule, NewExtensionModule, NotFoundExtensionModule]:
modname = args[0]
disabled, required, _ = extract_required_kwarg(kwargs, self.subproject)
if disabled:
return NotFoundExtensionModule()
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
try:
# check if stable module exists
mod = self._import_module(plainname, required)
# XXX: this is actually not helpful, since it doesn't do a version check
mlog.warning(f'Module {modname} is now stable, please use the {plainname} module instead.')
return mod
except InvalidArguments:
mlog.warning(f'Module {modname} has no backwards or forwards compatibility and might not exist in future releases.', location=node)
modname = 'unstable_' + plainname
return self._import_module(modname, required)
@typed_pos_args('files', varargs=str)
@noKwargs
def func_files(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> T.List[mesonlib.File]:
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args[0]]
# Used by declare_dependency() and pkgconfig.generate()
def extract_variables(self, kwargs, argname='variables', list_new=False, dict_new=False):
variables = kwargs.get(argname, {})
if isinstance(variables, dict):
if dict_new and variables:
FeatureNew.single_use(f'{argname} as dictionary', '0.56.0', self.subproject, location=self.current_node)
else:
varlist = mesonlib.stringlistify(variables)
if list_new:
FeatureNew.single_use(f'{argname} as list of strings', '0.56.0', self.subproject, location=self.current_node)
variables = collections.OrderedDict()
for v in varlist:
try:
(key, value) = v.split('=', 1)
except ValueError:
raise InterpreterException(f'Variable {v!r} must have a value separated by equals sign.')
variables[key.strip()] = value.strip()
for k, v in variables.items():
if not k or not v:
raise InterpreterException('Empty variable name or value')
if any(c.isspace() for c in k):
raise InterpreterException(f'Invalid whitespace in variable name "{k}"')
if not isinstance(v, str):
raise InterpreterException('variables values must be strings.')
return variables
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@FeatureNewKwargs('declare_dependency', '0.62.0', ['d_module_versions', 'd_import_dirs'])
@permittedKwargs({'include_directories', 'link_with', 'sources', 'dependencies',
'compile_args', 'link_args', 'link_whole', 'version',
'variables', 'd_module_versions', 'd_import_dirs'})
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = extract_as_list(kwargs, 'link_with')
libs_whole = extract_as_list(kwargs, 'link_whole')
sources = extract_as_list(kwargs, 'sources')
sources = listify(self.source_strings_to_files(sources))
deps = extract_as_list(kwargs, 'dependencies')
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = self.extract_variables(kwargs, list_new=True)
d_module_versions = extract_as_list(kwargs, 'd_module_versions')
d_import_dirs = self.extract_incdirs(kwargs, 'd_import_dirs')
final_deps = []
for d in deps:
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables, d_module_versions, d_import_dirs)
return dep
@typed_pos_args('assert', bool, optargs=[str])
@noKwargs
def func_assert(self, node: mparser.FunctionNode, args: T.Tuple[bool, T.Optional[str]],
kwargs: 'TYPE_kwargs') -> None:
value, message = args
if message is None:
FeatureNew.single_use('assert function without message argument', '0.53.0', self.subproject, location=node)
if not value:
if message is None:
from ..ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments(f'Expected {argcount} arguments, got {len(args)}.')
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
# Executables aren't actually accepted, but we allow them here to allow for
# better error messages when overridden
@typed_pos_args(
'run_command',
(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str),
varargs=(build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str))
@typed_kwargs(
'run_command',
KwargInfo('check', (bool, NoneType), since='0.47.0'),
KwargInfo('capture', bool, default=True, since='0.47.0'),
ENV_KW.evolve(since='0.50.0'),
)
def func_run_command(self, node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand') -> RunProcess:
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self,
node: mparser.BaseNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str],
T.List[T.Union[build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str]]],
kwargs: 'kwargs.RunCommand',
in_builddir: bool = False) -> RunProcess:
cmd, cargs = args
capture = kwargs['capture']
env = kwargs['env']
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs['check']
if check is None:
mlog.warning(implicit_check_false_warning, once=True)
check = False
overridden_msg = ('Program {!r} was overridden with the compiled '
'executable {!r} and therefore cannot be used during '
'configuration')
expanded_args: T.List[str] = []
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
raise InterpreterException(overridden_msg.format(progname, cmd.description()))
if isinstance(cmd, ExternalProgram):
if not cmd.found():
raise InterpreterException(f'command {cmd.get_name()!r} not found or not executable')
elif isinstance(cmd, compilers.Compiler):
exelist = cmd.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException(f'Program or command {cmd!r} not found or not executable')
cmd = prog
for a in cargs:
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgram):
expanded_args.append(a.get_path())
elif isinstance(a, compilers.Compiler):
FeatureNew.single_use('Compiler object as a variadic argument to `run_command`', '0.61.0', self.subproject, location=node)
prog = ExternalProgram(a.exelist[0], silent=True)
if not prog.found():
raise InterpreterException(f'Program {cmd!r} not found or not executable')
expanded_args.append(prog.get_path())
else:
raise InterpreterException(overridden_msg.format(a.name, cmd.description()))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@typed_pos_args('subproject', str)
@typed_kwargs(
'subproject',
REQUIRED_KW,
DEFAULT_OPTIONS.evolve(since='0.38.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True),
)
def func_subproject(self, nodes: mparser.BaseNode, args: T.Tuple[str], kwargs_: kwargs.Subproject) -> SubprojectHolder:
kw: kwargs.DoSubproject = {
'required': kwargs_['required'],
'default_options': kwargs_['default_options'],
'version': kwargs_['version'],
'options': None,
'cmake_options': [],
}
return self.do_subproject(args[0], 'meson', kw)
def disabled_subproject(self, subp_name: str, disabled_feature: T.Optional[str] = None,
exception: T.Optional[Exception] = None) -> SubprojectHolder:
sub = SubprojectHolder(NullSubprojectInterpreter(), os.path.join(self.subproject_dir, subp_name),
disabled_feature=disabled_feature, exception=exception)
self.subprojects[subp_name] = sub
self.coredata.initialized_subprojects.add(subp_name)
return sub
def do_subproject(self, subp_name: str, method: Literal['meson', 'cmake'], kwargs: kwargs.DoSubproject) -> SubprojectHolder:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(subp_name), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(subp_name, disabled_feature=feature)
default_options = coredata.create_options_dict(kwargs['default_options'], subp_name)
if subp_name == '':
raise InterpreterException('Subproject name must not be empty.')
if subp_name[0] == '.':
raise InterpreterException('Subproject name must not start with a period.')
if '..' in subp_name:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(subp_name):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(subp_name):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if subp_name in self.subproject_stack:
fullstack = self.subproject_stack + [subp_name]
incpath = ' => '.join(fullstack)
raise InvalidCode(f'Recursive include of subprojects: {incpath}.')
if subp_name in self.subprojects:
subproject = self.subprojects[subp_name]
if required and not subproject.found():
raise InterpreterException(f'Subproject "{subproject.subdir}" required but not found.')
if kwargs['version']:
pv = self.build.subprojects[subp_name]
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
return subproject
r = self.environment.wrap_resolver
try:
subdir = r.resolve(subp_name, method)
except wrap.WrapException as e:
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subp_name), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
subdir_abs = os.path.join(self.environment.get_source_dir(), subdir)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
stack = ':'.join(self.subproject_stack + [subp_name])
m = ['\nExecuting subproject', mlog.bold(stack)]
if method != 'meson':
m += ['method', mlog.bold(method)]
mlog.log(*m, '\n', nested=False)
try:
if method == 'meson':
return self._do_subproject_meson(subp_name, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(subp_name, subdir, subdir_abs, default_options, kwargs)
else:
raise mesonlib.MesonBugException(f'The method {method} is invalid for the subproject {subp_name}')
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested(subp_name):
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(subdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(subp_name, exception=e)
raise e
def _do_subproject_meson(self, subp_name: str, subdir: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject,
ast: T.Optional[mparser.CodeBlockNode] = None,
build_def_files: T.Optional[T.List[str]] = None,
is_translated: bool = False) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, subp_name, subdir, self.subproject_dir,
default_options, ast=ast, is_translated=is_translated,
user_defined_options=self.user_defined_options)
# Those lists are shared by all interpreters. That means that
# even if the subproject fails, any modification that the subproject
# made to those lists will affect the parent project.
subi.subprojects = self.subprojects
subi.modules = self.modules
subi.holder_map = self.holder_map
subi.bound_holder_map = self.bound_holder_map
subi.summary = self.summary
subi.subproject_stack = self.subproject_stack + [subp_name]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(subp_name), 'finished.')
mlog.log()
if kwargs['version']:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException(f'Subproject {subp_name} version is {pv} but {wanted} required.')
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[subp_name] = SubprojectHolder(subi, subdir, warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files.update(build_def_files)
# We always need the subi.build_def_files, to propgate sub-sub-projects
self.build_def_files.update(subi.build_def_files)
self.build.merge(subi.build)
self.build.subprojects[subp_name] = subi.project_version
self.coredata.initialized_subprojects.add(subp_name)
return self.subprojects[subp_name]
def _do_subproject_cmake(self, subp_name: str, subdir: str, subdir_abs: str,
default_options: T.Dict[OptionKey, str],
kwargs: kwargs.DoSubproject) -> SubprojectHolder:
with mlog.nested(subp_name):
new_build = self.build.copy()
prefix = self.coredata.options[OptionKey('prefix')].value
from ..modules.cmake import CMakeSubprojectOptions
options = kwargs['options'] or CMakeSubprojectOptions()
cmake_options = kwargs['cmake_options'] + options.cmake_options
cm_int = CMakeInterpreter(new_build, Path(subdir), Path(subdir_abs), Path(prefix), new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson(options.target_options)
mlog.log()
with mlog.nested('cmake-ast'):
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from ..ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w", encoding='utf-8') as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(subp_name, subdir, default_options, kwargs, ast, [str(f) for f in cm_int.bs_files], is_translated=True)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname: str) -> coredata.UserOption:
key = OptionKey.from_string(optname).evolve(subproject=self.subproject)
if not key.is_project():
for opts in [self.coredata.options, compilers.base_options]:
v = opts.get(key)
if v is None or v.yielding:
v = opts.get(key.as_root())
if v is not None:
assert isinstance(v, coredata.UserOption), 'for mypy'
return v
try:
opt = self.coredata.options[key]
if opt.yielding and key.subproject and key.as_root() in self.coredata.options:
popt = self.coredata.options[key.as_root()]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException(f'Tried to access unknown option {optname!r}.')
@typed_pos_args('get_option', str)
@noKwargs
def func_get_option(self, nodes: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'TYPE_kwargs') -> T.Union[coredata.UserOption, 'TYPE_var']:
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
opt.name = optname
return opt
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@typed_pos_args('configuration_data', optargs=[dict])
@noKwargs
def func_configuration_data(self, node: mparser.BaseNode, args: T.Tuple[T.Optional[T.Dict[str, T.Any]]],
kwargs: 'TYPE_kwargs') -> build.ConfigurationData:
initial_values = args[0]
if initial_values is not None:
FeatureNew.single_use('configuration_data dictionary', '0.49.0', self.subproject, location=node)
for k, v in initial_values.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
return build.ConfigurationData(initial_values)
def set_backend(self) -> None:
# The backend is already set when parsing subprojects
if self.backend is not None:
return
backend = self.coredata.get_option(OptionKey('backend'))
from ..backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException(f'Unknown backend "{backend}".')
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_option(OptionKey('backend'), self.backend.name)
# Only init backend options on first invocation otherwise it would
# override values previously set from command line.
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.options.items() if k.is_backend()}
self.coredata.set_options(options)
@typed_pos_args('project', str, varargs=str)
@typed_kwargs(
'project',
DEFAULT_OPTIONS,
KwargInfo('meson_version', (str, NoneType)),
KwargInfo(
'version',
(str, mesonlib.File, NoneType, list),
default='undefined',
validator=_project_version_validator,
convertor=lambda x: x[0] if isinstance(x, list) else x,
),
KwargInfo('license', ContainerTypeInfo(list, str), default=['unknown'], listify=True),
KwargInfo('subproject_dir', str, default='subprojects'),
)
def func_project(self, node: mparser.FunctionNode, args: T.Tuple[str, T.List[str]], kwargs: 'kwargs.Project') -> None:
proj_name, proj_langs = args
if ':' in proj_name:
raise InvalidArguments(f"Project name {proj_name!r} must not contain ':'")
# This needs to be evaluated as early as possible, as meson uses this
# for things like deprecation testing.
if kwargs['meson_version']:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException(f'Meson version is {cv} but project requires {pv}')
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.update_project_options(oi.options)
self.add_build_def_file(self.option_file)
# Do not set default_options on reconfigure otherwise it would override
# values previously set from command line. That means that changing
# default_options in a project will trigger a reconfigure but won't
# have any effect.
self.project_default_options = coredata.create_options_dict(
kwargs['default_options'], self.subproject)
# If this is the first invocation we always need to initialize
# builtins, if this is a subproject that is new in a re-invocation we
# need to initialize builtins for that
if self.environment.first_invocation or (self.subproject != '' and self.subproject not in self.coredata.initialized_subprojects):
default_options = self.project_default_options.copy()
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
version = kwargs['version']
if isinstance(version, mesonlib.File):
FeatureNew.single_use('version from file', '0.57.0', self.subproject, location=node)
self.add_build_def_file(version)
ifname = version.absolute_path(self.environment.source_dir,
self.environment.build_dir)
try:
ver_data = Path(ifname).read_text(encoding='utf-8').split('\n')
except FileNotFoundError:
raise InterpreterException('Version file not found.')
if len(ver_data) == 2 and ver_data[1] == '':
ver_data = ver_data[0:1]
if len(ver_data) != 1:
raise InterpreterException('Version file must contain exactly one line of text.')
self.project_version = ver_data[0]
else:
self.project_version = version
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = kwargs['license']
self.build.dep_manifest[proj_name] = build.DepManifest(self.project_version, proj_license)
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
# spdirname is the subproject_dir for this project, relative to self.subdir.
# self.subproject_dir is the subproject_dir for the main project, relative to top source dir.
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
if not self.is_subproject():
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
# Load wrap files from this (sub)project.
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if not self.is_subproject() or wrap_mode != WrapMode.nopromote:
subdir = os.path.join(self.subdir, spdirname)
r = wrap.Resolver(self.environment.get_source_dir(), subdir, self.subproject, wrap_mode)
if self.is_subproject():
self.environment.wrap_resolver.merge_wraps(r)
else:
self.environment.wrap_resolver = r
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
if not self.is_subproject():
# We have to activate VS before adding languages and before calling
# self.set_backend() otherwise it wouldn't be able to detect which
# vs backend version we need. But after setting default_options in case
# the project sets vs backend by default.
backend = self.coredata.get_option(OptionKey('backend'))
force_vsenv = self.user_defined_options.vsenv or backend.startswith('vs')
if mesonlib.setup_vsenv(force_vsenv):
self.build.need_vsenv = True
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.add_languages(proj_langs, False, MachineChoice.BUILD)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@typed_kwargs('add_languages', KwargInfo('native', (bool, NoneType), since='0.54.0'), REQUIRED_KW)
@typed_pos_args('add_languages', varargs=str)
def func_add_languages(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddLanguages') -> bool:
langs = args[0]
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
native = kwargs['native']
if disabled:
for lang in sorted(langs, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if native is not None:
return self.add_languages(langs, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.54.0'):
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=node)
success = self.add_languages(langs, False, MachineChoice.BUILD)
success &= self.add_languages(langs, required, MachineChoice.HOST)
return success
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('message with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNew('summary', '0.53.0')
@typed_pos_args('summary', (str, dict), optargs=[object])
@typed_kwargs(
'summary',
KwargInfo('section', str, default=''),
KwargInfo('bool_yn', bool, default=False),
KwargInfo('list_sep', (str, NoneType), since='0.54.0')
)
def func_summary(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, T.Dict[str, T.Any]], T.Optional[T.Any]],
kwargs: 'kwargs.Summary') -> None:
if args[1] is None:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
else:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
self.summary_impl(kwargs['section'], values, kwargs)
def summary_impl(self, section: str, values, kwargs: 'kwargs.Summary') -> None:
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(
section, values, kwargs['bool_yn'], kwargs['list_sep'], self.subproject)
def _print_summary(self) -> None:
# Add automatic 'Supbrojects' section in main project.
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, f'Feature {subp.disabled_feature!r} disabled']
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, f'{subp.warnings} warnings']
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
# Add automatic section with all user defined options
if self.user_defined_options:
values = collections.OrderedDict()
if self.user_defined_options.cross_file:
values['Cross files'] = self.user_defined_options.cross_file
if self.user_defined_options.native_file:
values['Native files'] = self.user_defined_options.native_file
sorted_options = sorted(self.user_defined_options.cmd_line_options.items())
values.update({str(k): v for k, v in sorted_options})
if values:
self.summary_impl('User defined options', values, {'bool_yn': False, 'list_sep': None})
# Print all summaries, main project last.
mlog.log('') # newline
main_summary = self.summary.pop('', None)
for subp_name, summary in sorted(self.summary.items()):
if self.subprojects[subp_name].found():
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('warning with more than one argument', '0.54.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
mlog.warning(*args_str, location=node)
@noArgsFlattening
@noKwargs
def func_error(self, node, args, kwargs):
if len(args) > 1:
FeatureNew.single_use('error with more than one argument', '0.58.0', self.subproject, location=node)
args_str = [stringifyUserArguments(i) for i in args]
raise InterpreterException('Problem encountered: ' + ' '.join(args_str))
@noKwargs
@noPosargs
def func_exception(self, node, args, kwargs):
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
if for_machine != MachineChoice.HOST and not should:
return False
if not self.environment.is_cross_build() and not should:
return False
return should
def add_languages_for(self, args: T.List[str], required: bool, for_machine: MachineChoice) -> bool:
args = [a.lower() for a in args]
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
# We'd really like to add cython's default language here, but it can't
# actually be done because the cython compiler hasn't been initialized,
# so we can't actually get the option yet. Because we can't know what
# compiler to add by default, and we don't want to add unnecessary
# compilers we don't add anything for cython here, and instead do it
# When the first cython target using a particular language is used.
if 'vala' in langs and 'c' not in langs:
FeatureNew.single_use('Adding Vala language without C', '0.59.0', self.subproject, location=self.current_node)
args.append('c')
success = True
for lang in sorted(args, key=compilers.sort_clink):
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = compilers.detect_compiler_for(self.environment, lang, for_machine)
if comp is None:
raise InvalidArguments(f'Tried to use unknown language "{lang}".')
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine: MachineChoice, prognames: T.List[mesonlib.FileOrString]
) -> T.Optional[ExternalProgram]:
for p in prognames:
if isinstance(p, mesonlib.File):
continue # Always points to a local (i.e. self generated) file.
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return prog
return None
def program_from_system(self, args: T.List[mesonlib.FileOrString], search_dirs: T.List[str],
extra_info: T.List[mlog.TV_Loggable]) -> T.Optional[ExternalProgram]:
# Search for scripts relative to current subdir.
# Do not cache found programs because find_program('foobar')
# might give different results when run from different source dirs.
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments(f'find_program only accepts strings and files, not {exename!r}')
extprog = ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=True)
if extprog.found():
extra_info.append(f"({' '.join(extprog.get_command())})")
return extprog
return None
def program_from_overrides(self, command_names: T.List[mesonlib.FileOrString],
extra_info: T.List['mlog.TV_Loggable']
) -> T.Optional[T.Union[ExternalProgram, OverrideProgram, build.Executable]]:
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
extra_info.append(mlog.blue('(overridden)'))
return exe
return None
def store_name_lookups(self, command_names: T.List[mesonlib.FileOrString]) -> None:
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name: str, exe: T.Union[build.Executable, ExternalProgram, 'OverrideProgram']) -> None:
if name in self.build.searched_programs:
raise InterpreterException(f'Tried to override finding of executable "{name}" which has already been found.')
if name in self.build.find_overrides:
raise InterpreterException(f'Tried to override executable "{name}" which has already been overridden.')
self.build.find_overrides[name] = exe
def notfound_program(self, args: T.List[mesonlib.FileOrString]) -> ExternalProgram:
return NonExistingExternalProgram(' '.join(
[a if isinstance(a, str) else a.absolute_path(self.environment.source_dir, self.environment.build_dir)
for a in args]))
# TODO update modules to always pass `for_machine`. It is bad-form to assume
# the host machine.
def find_program_impl(self, args: T.List[mesonlib.FileOrString],
for_machine: MachineChoice = MachineChoice.HOST,
required: bool = True, silent: bool = True,
wanted: T.Union[str, T.List[str]] = '',
search_dirs: T.Optional[T.List[str]] = None,
version_func: T.Optional[T.Callable[[T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']], str]] = None
) -> T.Union['ExternalProgram', 'build.Executable', 'OverrideProgram']:
args = mesonlib.listify(args)
extra_info: T.List[mlog.TV_Loggable] = []
progobj = self.program_lookup(args, for_machine, required, search_dirs, extra_info)
if progobj is None:
progobj = self.notfound_program(args)
if isinstance(progobj, ExternalProgram) and not progobj.found():
if not silent:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'))
if required:
m = 'Program {!r} not found or not executable'
raise InterpreterException(m.format(progobj.get_name()))
return progobj
if wanted:
if version_func:
version = version_func(progobj)
elif isinstance(progobj, build.Executable):
if progobj.subproject:
interp = self.subprojects[progobj.subproject].held_object
else:
interp = self
assert isinstance(interp, Interpreter)
version = interp.project_version
else:
version = progobj.get_version(self)
is_found, not_found, _ = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(version), 'but need:',
mlog.bold(', '.join([f"'{e}'" for e in not_found])), *extra_info)
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InterpreterException(m.format(progobj.name, not_found, version))
return self.notfound_program(args)
extra_info.insert(0, mlog.normal_cyan(version))
# Only store successful lookups
self.store_name_lookups(args)
if not silent:
mlog.log('Program', mlog.bold(progobj.name), 'found:', mlog.green('YES'), *extra_info)
if isinstance(progobj, build.Executable):
progobj.was_returned_by_find_program = True
return progobj
def program_lookup(self, args: T.List[mesonlib.FileOrString], for_machine: MachineChoice,
required: bool, search_dirs: T.List[str], extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
progobj = self.program_from_overrides(args, extra_info)
if progobj:
return progobj
fallback = None
wrap_mode = self.coredata.get_option(OptionKey('wrap_mode'))
if wrap_mode != WrapMode.nofallback and self.environment.wrap_resolver:
fallback = self.environment.wrap_resolver.find_program_provider(args)
if fallback and wrap_mode == WrapMode.forcefallback:
return self.find_program_fallback(fallback, args, required, extra_info)
progobj = self.program_from_file_for(for_machine, args)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, extra_info)
if progobj is None and args[0].endswith('python3'):
prog = ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = prog if prog.found() else None
if progobj is None and fallback and required:
progobj = self.find_program_fallback(fallback, args, required, extra_info)
return progobj
def find_program_fallback(self, fallback: str, args: T.List[mesonlib.FileOrString],
required: bool, extra_info: T.List[mlog.TV_Loggable]
) -> T.Optional[T.Union[ExternalProgram, build.Executable, OverrideProgram]]:
mlog.log('Fallback to subproject', mlog.bold(fallback), 'which provides program',
mlog.bold(' '.join(args)))
sp_kwargs: kwargs.DoSubproject = {
'required': required,
'default_options': [],
'version': [],
'cmake_options': [],
'options': None,
}
self.do_subproject(fallback, 'meson', sp_kwargs)
return self.program_from_overrides(args, extra_info)
@typed_pos_args('find_program', varargs=(str, mesonlib.File), min_varargs=1)
@typed_kwargs(
'find_program',
DISABLER_KW.evolve(since='0.49.0'),
NATIVE_KW,
REQUIRED_KW,
KwargInfo('dirs', ContainerTypeInfo(list, str), default=[], listify=True, since='0.53.0'),
KwargInfo('version', ContainerTypeInfo(list, str), default=[], listify=True, since='0.52.0'),
)
@disablerIfNotFound
def func_find_program(self, node: mparser.BaseNode, args: T.Tuple[T.List[mesonlib.FileOrString]],
kwargs: 'kwargs.FindProgram',
) -> T.Union['build.Executable', ExternalProgram, 'OverrideProgram']:
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args[0])), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_program(args[0])
search_dirs = extract_search_dirs(kwargs)
return self.find_program_impl(args[0], kwargs['native'], required=required,
silent=False, wanted=kwargs['version'],
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
# When adding kwargs, please check if they make sense in dependencies.get_dep_identifier()
@FeatureNewKwargs('dependency', '0.57.0', ['cmake_package_version'])
@FeatureNewKwargs('dependency', '0.56.0', ['allow_fallback'])
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_dependency_kwargs)
@typed_pos_args('dependency', varargs=str, min_varargs=1)
def func_dependency(self, node, args, kwargs):
# Replace '' by empty list of names
names = [n for n in args[0] if n]
if len(names) > 1:
FeatureNew('dependency with more than one name', '0.60.0').use(self.subproject)
allow_fallback = kwargs.get('allow_fallback')
if allow_fallback is not None and not isinstance(allow_fallback, bool):
raise InvalidArguments('"allow_fallback" argument must be boolean')
fallback = kwargs.get('fallback')
default_options = kwargs.get('default_options')
df = DependencyFallbacksHolder(self, names, allow_fallback, default_options)
df.set_fallback(fallback)
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = df.lookup(kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
assert isinstance(d, Dependency)
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
# Ensure the correct include type
if 'include_type' in kwargs:
wanted = kwargs['include_type']
if not isinstance(wanted, str):
raise InvalidArguments('The `include_type` kwarg must be a string')
actual = d.get_include_type()
if wanted != actual:
mlog.debug(f'Current include type of {args[0]} is {actual}. Converting to requested {wanted}')
d = d.generate_system_dependency(wanted)
if d.feature_since is not None:
version, extra_msg = d.feature_since
FeatureNew.single_use(f'dep {d.name!r} custom lookup', version, self.subproject, extra_msg, node)
for f in d.featurechecks:
f.use(self.subproject, node)
return d
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@FeatureNewKwargs('executable', '0.56.0', ['win_subsystem'])
@FeatureDeprecatedKwargs('executable', '0.56.0', ['gui_app'], extra_message="Use 'win_subsystem' instead.")
@permittedKwargs(build.known_exe_kwargs)
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Executable)
@permittedKwargs(build.known_stlib_kwargs)
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.StaticLibrary)
@permittedKwargs(build.known_shlib_kwargs)
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, build.SharedLibrary)
holder.shared_library_only = True
return holder
@permittedKwargs(known_library_kwargs)
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(build.known_shmod_kwargs)
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.SharedModule)
@permittedKwargs(known_library_kwargs)
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(build.known_jar_kwargs)
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, build.Jar)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(known_build_target_kwargs)
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, build.Executable)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, build.SharedModule)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, build.Jar)
else:
raise InterpreterException('Unknown target_type.')
@noPosargs
@typed_kwargs(
'vcs_tag',
CT_INPUT_KW.evolve(required=True),
CT_OUTPUT_KW,
# Cannot use the COMMAND_KW because command is allowed to be empty
KwargInfo(
'command',
ContainerTypeInfo(list, (str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex, ExternalProgram, mesonlib.File)),
listify=True,
default=[],
),
KwargInfo('fallback', (str, NoneType)),
KwargInfo('replace_string', str, default='@VCS_TAG@'),
)
def func_vcs_tag(self, node: mparser.BaseNode, args: T.List['TYPE_var'], kwargs: 'kwargs.VcsTag') -> build.CustomTarget:
if kwargs['fallback'] is None:
FeatureNew.single_use('Optional fallback in vcs_tag', '0.41.0', self.subproject, location=node)
fallback = kwargs['fallback'] or self.project_version
replace_string = kwargs['replace_string']
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs['command']
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
if isinstance(vcs_cmd[0], mesonlib.File):
FeatureNew.single_use('vcs_tag with file as the first argument', '0.62.0', self.subproject, location=node)
maincmd = self.find_program_impl(vcs_cmd[0], required=False)
if maincmd.found():
vcs_cmd[0] = maincmd
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found {} repository at {}'.format(vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
self._validate_custom_target_outputs(len(kwargs['input']) > 1, kwargs['output'], "vcs_tag")
tg = build.CustomTarget(
kwargs['output'][0],
self.subdir,
self.subproject,
self.environment.get_build_command() +
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd,
self.source_strings_to_files(kwargs['input']),
kwargs['output'],
build_by_default=True,
build_always_stale=True,
)
self.add_target(tg.name, tg)
return tg
@FeatureNew('subdir_done', '0.46.0')
@noPosargs
@noKwargs
def func_subdir_done(self, node, args, kwargs):
raise SubdirDoneRequest()
@staticmethod
def _validate_custom_target_outputs(has_multi_in: bool, outputs: T.Iterable[str], name: str) -> None:
"""Checks for additional invalid values in a custom_target output.
This cannot be done with typed_kwargs because it requires the number of
inputs.
"""
for out in outputs:
if has_multi_in and ('@PLAINNAME@' in out or '@BASENAME@' in out):
raise InvalidArguments(f'{name}: output cannot containe "@PLAINNAME@" or "@BASENAME@" '
'when there is more than one input (we can\'t know which to use)')
@typed_pos_args('custom_target', optargs=[str])
@typed_kwargs(
'custom_target',
COMMAND_KW,
CT_BUILD_ALWAYS,
CT_BUILD_ALWAYS_STALE,
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_INSTALL_TAG_KW,
CT_OUTPUT_KW,
DEPENDS_KW,
DEPEND_FILES_KW,
DEPFILE_KW,
ENV_KW.evolve(since='0.57.0'),
INSTALL_KW,
INSTALL_MODE_KW.evolve(since='0.47.0'),
OVERRIDE_OPTIONS_KW,
KwargInfo('feed', bool, default=False, since='0.59.0'),
KwargInfo('capture', bool, default=False),
KwargInfo('console', bool, default=False, since='0.48.0'),
)
def func_custom_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.CustomTarget') -> build.CustomTarget:
if kwargs['depfile'] and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew.single_use('substitutions in custom_target depfile', '0.47.0', self.subproject, location=node)
# Don't mutate the kwargs
build_by_default = kwargs['build_by_default']
build_always_stale = kwargs['build_always_stale']
# Remap build_always to build_by_default and build_always_stale
if kwargs['build_always'] is not None and kwargs['build_always_stale'] is not None:
raise InterpreterException('CustomTarget: "build_always" and "build_always_stale" are mutually exclusive')
if build_by_default is None and kwargs['install']:
build_by_default = True
elif kwargs['build_always'] is not None:
if build_by_default is None:
build_by_default = kwargs['build_always']
build_always_stale = kwargs['build_by_default']
# These are are nullaable so that we can know whether they're explicitly
# set or not. If they haven't been overwritten, set them to their true
# default
if build_by_default is None:
build_by_default = False
if build_always_stale is None:
build_always_stale = False
name = args[0]
if name is None:
# name will default to first output, but we cannot do that yet because
# they could need substitutions (e.g. @BASENAME@) first. CustomTarget()
# will take care of setting a proper default but name must be an empty
# string in the meantime.
FeatureNew.single_use('custom_target() with no name argument', '0.60.0', self.subproject, location=node)
name = ''
inputs = self.source_strings_to_files(kwargs['input'], strict=False)
command = kwargs['command']
if command and isinstance(command[0], str):
command[0] = self.find_program_impl([command[0]])
if len(inputs) > 1 and kwargs['feed']:
raise InvalidArguments('custom_target: "feed" keyword argument can only be used used with a single input')
if len(kwargs['output']) > 1 and kwargs['capture']:
raise InvalidArguments('custom_target: "capture" keyword argument can only be used used with a single output')
if kwargs['capture'] and kwargs['console']:
raise InvalidArguments('custom_target: "capture" and "console" keyword arguments are mutually exclusive')
for c in command:
if kwargs['capture'] and isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('custom_target: "capture" keyword argument cannot be used with "@OUTPUT@"')
if kwargs['feed'] and isinstance(c, str) and '@INPUT@' in c:
raise InvalidArguments('custom_target: "feed" keyword argument cannot be used with "@INPUT@"')
if kwargs['install'] and not kwargs['install_dir']:
raise InvalidArguments('custom_target: "install_dir" keyword argument must be set when "install" is true.')
if len(kwargs['install_dir']) > 1:
FeatureNew.single_use('multiple install_dir for custom_target', '0.40.0', self.subproject, location=node)
if len(kwargs['install_tag']) not in {0, 1, len(kwargs['output'])}:
raise InvalidArguments('custom_target: install_tag argument must have 0 or 1 outputs, '
'or the same number of elements as the output keyword argument. '
f'(there are {len(kwargs["install_tag"])} install_tags, '
f'and {len(kwargs["output"])} outputs)')
self._validate_custom_target_outputs(len(inputs) > 1, kwargs['output'], "custom_target")
tg = build.CustomTarget(
name,
self.subdir,
self.subproject,
command,
inputs,
kwargs['output'],
build_always_stale=build_always_stale,
build_by_default=build_by_default,
capture=kwargs['capture'],
console=kwargs['console'],
depend_files=kwargs['depend_files'],
depfile=kwargs['depfile'],
extra_depends=kwargs['depends'],
env=kwargs['env'],
feed=kwargs['feed'],
install=kwargs['install'],
install_dir=kwargs['install_dir'],
install_mode=kwargs['install_mode'],
install_tag=kwargs['install_tag'],
override_options=kwargs['override_options'],
backend=self.backend)
self.add_target(tg.name, tg)
return tg
@typed_pos_args('run_target', str)
@typed_kwargs(
'run_target',
COMMAND_KW,
DEPENDS_KW,
ENV_KW.evolve(since='0.57.0'),
)
def func_run_target(self, node: mparser.FunctionNode, args: T.Tuple[str],
kwargs: 'kwargs.RunTarget') -> build.RunTarget:
all_args = kwargs['command'].copy()
for i in listify(all_args):
if isinstance(i, ExternalProgram) and not i.found():
raise InterpreterException(f'Tried to use non-existing executable {i.name!r}')
if isinstance(all_args[0], str):
all_args[0] = self.find_program_impl([all_args[0]])
name = args[0]
tg = build.RunTarget(name, all_args, kwargs['depends'], self.subdir, self.subproject, kwargs['env'])
self.add_target(name, tg)
full_name = (self.subproject, name)
assert full_name not in self.build.run_target_names
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@typed_pos_args('alias_target', str, varargs=build.Target, min_varargs=1)
@noKwargs
def func_alias_target(self, node: mparser.BaseNode, args: T.Tuple[str, T.List[build.Target]],
kwargs: 'TYPE_kwargs') -> build.AliasTarget:
name, deps = args
tg = build.AliasTarget(name, deps, self.subdir, self.subproject)
self.add_target(name, tg)
return tg
@typed_pos_args('generator', (build.Executable, ExternalProgram))
@typed_kwargs(
'generator',
KwargInfo('arguments', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
KwargInfo('output', ContainerTypeInfo(list, str, allow_empty=False), required=True, listify=True),
DEPFILE_KW,
DEPENDS_KW,
KwargInfo('capture', bool, default=False, since='0.43.0'),
)
def func_generator(self, node: mparser.FunctionNode,
args: T.Tuple[T.Union[build.Executable, ExternalProgram]],
kwargs: 'kwargs.FuncGenerator') -> build.Generator:
for rule in kwargs['output']:
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"output" must not contain a directory separator.')
if len(kwargs['output']) > 1:
for o in kwargs['output']:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
gen = build.Generator(args[0], **kwargs)
self.generators.append(gen)
return gen
@typed_pos_args('benchmark', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('benchmark', *TEST_KWARGS)
def func_benchmark(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncBenchmark') -> None:
self.add_test(node, args, kwargs, False)
@typed_pos_args('test', str, (build.Executable, build.Jar, ExternalProgram, mesonlib.File))
@typed_kwargs('test', *TEST_KWARGS, KwargInfo('is_parallel', bool, default=True))
def func_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.FuncTest') -> None:
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs: T.Union[build.EnvironmentVariables, T.Dict[str, 'TYPE_var'], T.List['TYPE_var'], str]) -> build.EnvironmentVariables:
envlist = kwargs.get('env')
if envlist is None:
return build.EnvironmentVariables()
msg = ENV_KW.validator(envlist)
if msg:
raise InvalidArguments(f'"env": {msg}')
return ENV_KW.convertor(envlist)
def make_test(self, node: mparser.BaseNode,
args: T.Tuple[str, T.Union[build.Executable, build.Jar, ExternalProgram, mesonlib.File]],
kwargs: 'kwargs.BaseTest') -> Test:
name = args[0]
if ':' in name:
mlog.deprecation(f'":" is not allowed in test name "{name}", it has been replaced with "_"',
location=node)
name = name.replace(':', '_')
exe = args[1]
if isinstance(exe, ExternalProgram):
if not exe.found():
raise InvalidArguments('Tried to use not-found external program as test exe')
elif isinstance(exe, mesonlib.File):
exe = self.find_program_impl([exe])
env = self.unpack_env_kwarg(kwargs)
if kwargs['timeout'] <= 0:
FeatureNew.single_use('test() timeout <= 0', '0.57.0', self.subproject, location=node)
prj = self.subproject if self.is_subproject() else self.build.project_name
suite: T.List[str] = []
for s in kwargs['suite']:
if s:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
return Test(name,
prj,
suite,
exe,
kwargs['depends'],
kwargs.get('is_parallel', False),
kwargs['args'],
env,
kwargs['should_fail'],
kwargs['timeout'],
kwargs['workdir'],
kwargs['protocol'],
kwargs['priority'],
kwargs['verbose'])
def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool):
t = self.make_test(node, args, kwargs)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(t.name, True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(t.name, True))
@typed_pos_args('install_headers', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_headers',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('subdir', (str, NoneType)),
INSTALL_MODE_KW.evolve(since='0.47.0'),
)
def func_install_headers(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallHeaders') -> build.Headers:
source_files = self.source_strings_to_files(args[0])
install_subdir = kwargs['subdir']
if install_subdir is not None:
if kwargs['install_dir'] is not None:
raise InterpreterException('install_headers: cannot specify both "install_dir" and "subdir". Use only "install_dir".')
if os.path.isabs(install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
h = build.Headers(source_files, install_subdir, kwargs['install_dir'],
kwargs['install_mode'], self.subproject)
self.build.headers.append(h)
return h
@typed_pos_args('install_man', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_man',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('locale', (str, NoneType), since='0.58.0'),
INSTALL_MODE_KW.evolve(since='0.47.0')
)
def func_install_man(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallMan') -> build.Man:
# We just need to narrow this, because the input is limited to files and
# Strings as inputs, so only Files will be returned
sources = self.source_strings_to_files(args[0])
for s in sources:
try:
num = int(s.rsplit('.', 1)[-1])
except (IndexError, ValueError):
num = 0
if not 1 <= num <= 9:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 9')
m = build.Man(sources, kwargs['install_dir'], kwargs['install_mode'],
self.subproject, kwargs['locale'])
self.build.man.append(m)
return m
@FeatureNew('install_emptydir', '0.60.0')
@typed_kwargs(
'install_emptydir',
INSTALL_MODE_KW,
KwargInfo('install_tag', (str, NoneType), since='0.62.0')
)
def func_install_emptydir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs) -> None:
d = build.EmptyDir(args[0], kwargs['install_mode'], self.subproject, kwargs['install_tag'])
self.build.emptydir.append(d)
return d
@FeatureNew('install_symlink', '0.61.0')
@typed_pos_args('symlink_name', str)
@typed_kwargs(
'install_symlink',
KwargInfo('pointing_to', str, required=True),
KwargInfo('install_dir', str, required=True),
INSTALL_TAG_KW,
)
def func_install_symlink(self, node: mparser.BaseNode,
args: T.Tuple[T.List[str]],
kwargs) -> build.SymlinkData:
name = args[0] # Validation while creating the SymlinkData object
target = kwargs['pointing_to']
l = build.SymlinkData(target, name, kwargs['install_dir'],
self.subproject, kwargs['install_tag'])
self.build.symlinks.append(l)
return l
@FeatureNew('structured_sources', '0.62.0')
@typed_pos_args('structured_sources', object, optargs=[dict])
@noKwargs
@noArgsFlattening
def func_structured_sources(
self, node: mparser.BaseNode,
args: T.Tuple[object, T.Optional[T.Dict[str, object]]],
kwargs: 'TYPE_kwargs') -> build.StructuredSources:
valid_types = (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList)
sources: T.Dict[str, T.List[T.Union[mesonlib.File, 'build.GeneratedTypes']]] = collections.defaultdict(list)
for arg in mesonlib.listify(args[0]):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[''].append(arg)
if args[1]:
if '' in args[1]:
raise InvalidArguments('structured_sources: keys to dictionary argument may not be an empty string.')
for k, v in args[1].items():
for arg in mesonlib.listify(v):
if not isinstance(arg, valid_types):
raise InvalidArguments(f'structured_sources: type "{type(arg)}" is not valid')
if isinstance(arg, str):
arg = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, arg)
sources[k].append(arg)
return build.StructuredSources(sources)
@typed_pos_args('subdir', str)
@typed_kwargs(
'subdir',
KwargInfo(
'if_found',
ContainerTypeInfo(list, object),
validator=lambda a: 'Objects must have a found() method' if not all(hasattr(x, 'found') for x in a) else None,
since='0.44.0',
default=[],
listify=True,
),
)
def func_subdir(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.Subdir') -> None:
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
if args[0] == '':
raise InvalidArguments("The argument given to subdir() is the empty string ''. This is prohibited.")
for i in kwargs['if_found']:
if not i.found():
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
build_file = os.path.join(symlinkless_dir, 'meson.build')
if build_file in self.processed_buildfiles:
raise InvalidArguments(f'Tried to enter directory "{subdir}", which has already been visited.')
self.processed_buildfiles.add(build_file)
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.add(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException(f"Non-existent build file '{buildfilename!s}'")
with open(absname, encoding='utf-8') as f:
code = f.read()
assert isinstance(code, str)
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs: T.Dict[str, T.Any]) -> T.Optional[FileMode]:
if kwargs.get('install_mode', None) is None:
return None
if isinstance(kwargs['install_mode'], FileMode):
return kwargs['install_mode']
install_mode: T.List[str] = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
# We skip any arguments that are set to `false`
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@typed_pos_args('install_data', varargs=(str, mesonlib.File))
@typed_kwargs(
'install_data',
KwargInfo('install_dir', (str, NoneType)),
KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), listify=True, default=[]),
KwargInfo('rename', ContainerTypeInfo(list, str), default=[], listify=True, since='0.46.0'),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_data(self, node: mparser.BaseNode,
args: T.Tuple[T.List['mesonlib.FileOrString']],
kwargs: 'kwargs.FuncInstallData') -> build.Data:
sources = self.source_strings_to_files(args[0] + kwargs['sources'])
rename = kwargs['rename'] or None
if rename:
if len(rename) != len(sources):
raise InvalidArguments(
'"rename" and "sources" argument lists must be the same length if "rename" is given. '
f'Rename has {len(rename)} elements and sources has {len(sources)}.')
install_dir_name = kwargs['install_dir']
if install_dir_name:
if not os.path.isabs(install_dir_name):
install_dir_name = os.path.join('{datadir}', install_dir_name)
else:
install_dir_name = '{datadir}'
return self.install_data_impl(sources, kwargs['install_dir'], kwargs['install_mode'],
rename, kwargs['install_tag'], install_dir_name)
def install_data_impl(self, sources: T.List[mesonlib.File], install_dir: str,
install_mode: FileMode, rename: T.Optional[str],
tag: T.Optional[str],
install_dir_name: T.Optional[str] = None,
install_data_type: T.Optional[str] = None) -> build.Data:
"""Just the implementation with no validation."""
data = build.Data(sources, install_dir, install_dir_name or install_dir, install_mode,
self.subproject, rename, tag, install_data_type)
self.build.data.append(data)
return data
@typed_pos_args('install_subdir', str)
@typed_kwargs(
'install_subdir',
KwargInfo('install_dir', str, required=True),
KwargInfo('strip_directory', bool, default=False),
KwargInfo('exclude_files', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
KwargInfo('exclude_directories', ContainerTypeInfo(list, str),
default=[], listify=True, since='0.42.0',
validator=lambda x: 'cannot be absolute' if any(os.path.isabs(d) for d in x) else None),
INSTALL_MODE_KW.evolve(since='0.38.0'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
)
def func_install_subdir(self, node: mparser.BaseNode, args: T.Tuple[str],
kwargs: 'kwargs.FuncInstallSubdir') -> build.InstallDir:
exclude = (set(kwargs['exclude_files']), set(kwargs['exclude_directories']))
idir = build.InstallDir(
self.subdir,
args[0],
kwargs['install_dir'],
kwargs['install_mode'],
exclude,
kwargs['strip_directory'],
self.subproject,
install_tag=kwargs['install_tag'])
self.build.install_dirs.append(idir)
return idir
@noPosargs
@typed_kwargs(
'configure_file',
DEPFILE_KW.evolve(since='0.52.0'),
INSTALL_MODE_KW.evolve(since='0.47.0,'),
INSTALL_TAG_KW.evolve(since='0.60.0'),
KwargInfo('capture', bool, default=False, since='0.41.0'),
KwargInfo(
'command',
(ContainerTypeInfo(list, (build.Executable, ExternalProgram, compilers.Compiler, mesonlib.File, str), allow_empty=False), NoneType),
listify=True,
),
KwargInfo(
'configuration',
(ContainerTypeInfo(dict, (str, int, bool)), build.ConfigurationData, NoneType),
),
KwargInfo('copy', bool, default=False, since='0.47.0'),
KwargInfo('encoding', str, default='utf-8', since='0.47.0'),
KwargInfo('format', str, default='meson', since='0.46.0',
validator=in_set_validator({'meson', 'cmake', 'cmake@'})),
KwargInfo(
'input',
ContainerTypeInfo(list, (mesonlib.File, str)),
listify=True,
default=[],
),
# Cannot use shared implementation until None backwards compat is dropped
KwargInfo('install', (bool, NoneType), since='0.50.0'),
KwargInfo('install_dir', (str, bool), default='',
validator=lambda x: 'must be `false` if boolean' if x is True else None),
KwargInfo('output', str, required=True),
KwargInfo('output_format', str, default='c', since='0.47.0',
validator=in_set_validator({'c', 'nasm'})),
)
def func_configure_file(self, node: mparser.BaseNode, args: T.List[TYPE_var],
kwargs: kwargs.ConfigureFile):
actions = sorted(x for x in {'configuration', 'command', 'copy'}
if kwargs[x] not in [None, False])
num_actions = len(actions)
if num_actions == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif num_actions == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif num_actions == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if kwargs['capture'] and not kwargs['command']:
raise InvalidArguments('configure_file: "capture" keyword requires "command" keyword.')
fmt = kwargs['format']
output_format = kwargs['output_format']
depfile = kwargs['depfile']
# Validate input
inputs = self.source_strings_to_files(kwargs['input'])
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
# Validate output
output = kwargs['output']
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = f"{mesonbuildfile}:{self.current_lineno}"
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
# Perform the appropriate action
if kwargs['configuration'] is not None:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew.single_use('configure_file.configuration dictionary', '0.49.0', self.subproject, location=node)
for k, v in conf.items():
if not isinstance(v, (str, int, bool)):
raise InvalidArguments(
f'"configuration_data": initial value dictionary key "{k!r}"" must be "str | int | bool", not "{v!r}"')
conf = build.ConfigurationData(conf)
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs['encoding']
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
f"The variable(s) {var_list} in the input file '{inputs[0]}' are not "
"present in the given configuration data.", location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
tv = FeatureNew.get_target_version(self.subproject)
if FeatureNew.check_version(tv, '0.47.0'):
mlog.warning('Got an empty configuration_data() object and found no '
f'substitutions in the input file {ifbase!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0', location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf, output_format)
conf.used = True
elif kwargs['command'] is not None:
if len(inputs) > 1:
FeatureNew.single_use('multiple inputs in configure_file()', '0.52.0', self.subproject, location=node)
# We use absolute paths for input and output here because the cwd
# that the command is run from is 'unspecified', so it could change.
# Currently it's builddir/subdir for in_builddir else srcdir/subdir.
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
_cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
cmd, *args = _cmd
res = self.run_command_impl(node, (cmd, args),
{'capture': True, 'check': True, 'env': build.EnvironmentVariables()},
True)
if kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs['encoding']
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, encoding='utf-8') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif kwargs['copy']:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copy2(inputs_abs[0], ofile_abs)
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs['install_dir']
if idir is False:
idir = ''
FeatureDeprecated.single_use('configure_file install_dir: false', '0.50.0',
self.subproject, 'Use the `install:` kwarg instead', location=node)
install = kwargs['install'] if kwargs['install'] is not None else idir != ''
if install:
if not idir:
raise InterpreterException(
'"install_dir" must be specified when "install" in a configure_file is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = kwargs['install_mode']
install_tag = kwargs['install_tag']
self.build.data.append(build.Data([cfile], idir, idir, install_mode, self.subproject,
install_tag=install_tag, data_type='configure'))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs, key: str = 'include_directories'):
prospectives = extract_as_list(kwargs, key)
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]))
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@typed_pos_args('include_directories', varargs=str)
@typed_kwargs('include_directories', KwargInfo('is_system', bool, default=False))
def func_include_directories(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]],
kwargs: 'kwargs.FuncIncludeDirectories') -> build.IncludeDirs:
return self.build_incdir_object(args[0], kwargs['is_system'])
def build_incdir_object(self, incdir_strings: T.List[str], is_system: bool = False) -> build.IncludeDirs:
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments(textwrap.dedent('''\
Tried to form an absolute path to a source dir.
You should not do that but use relative paths instead.
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
'''))
else:
try:
self.validate_within_subproject(self.subdir, a)
except InterpreterException:
mlog.warning('include_directories sandbox violation!', location=self.current_node)
print(textwrap.dedent(f'''\
The project is trying to access the directory {a!r} which belongs to a different
subproject. This is a problem as it hardcodes the relative paths of these two projects.
This makes it impossible to compile the project in any other directory layout and also
prevents the subproject from changing its own directory layout.
Instead of poking directly at the internals the subproject should be executed and
it should set a variable that the caller can then use. Something like:
# In subproject
some_dep = declare_dependency(include_directories: include_directories('include'))
# In subproject wrap file
[provide]
some = some_dep
# In parent project
some_dep = dependency('some')
executable(..., dependencies: [some_dep])
This warning will become a hard error in a future Meson release.
'''))
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments(f'Include dir {a} does not exist.')
i = build.IncludeDirs(self.subdir, incdir_strings, is_system)
return i
@typed_pos_args('add_test_setup', str)
@typed_kwargs(
'add_test_setup',
KwargInfo('exe_wrapper', ContainerTypeInfo(list, (str, ExternalProgram)), listify=True, default=[]),
KwargInfo('gdb', bool, default=False),
KwargInfo('timeout_multiplier', int, default=1),
KwargInfo('exclude_suites', ContainerTypeInfo(list, str), listify=True, default=[], since='0.57.0'),
KwargInfo('is_default', bool, default=False, since='0.49.0'),
ENV_KW,
)
def func_add_test_setup(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'kwargs.AddTestSetup') -> None:
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = f'{(self.subproject if self.subproject else self.build.project_name)}:{setup_name}'
exe_wrapper: T.List[str] = []
for i in kwargs['exe_wrapper']:
if isinstance(i, str):
exe_wrapper.append(i)
else:
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
timeout_multiplier = kwargs['timeout_multiplier']
if timeout_multiplier <= 0:
FeatureNew('add_test_setup() timeout_multiplier <= 0', '0.57.0').use(self.subproject)
if kwargs['is_default']:
if self.build.test_setup_default_name is not None:
raise InterpreterException(f'{self.build.test_setup_default_name!r} is already set as default. '
'is_default can be set to true only once')
self.build.test_setup_default_name = setup_name
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, kwargs['gdb'], timeout_multiplier, kwargs['env'],
kwargs['exclude_suites'])
@typed_pos_args('add_global_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_global_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_global_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_global_arguments(node, self.build.global_link_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_arguments', varargs=str)
@typed_kwargs('add_project_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_args[kwargs['native']], args[0], kwargs)
@typed_pos_args('add_project_link_arguments', varargs=str)
@typed_kwargs('add_global_arguments', NATIVE_KW, LANGUAGE_KW)
def func_add_project_link_arguments(self, node: mparser.FunctionNode, args: T.Tuple[T.List[str]], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
self._add_project_arguments(node, self.build.projects_link_args[kwargs['native']], args[0], kwargs)
def _warn_about_builtin_args(self, args: T.List[str]) -> None:
# -Wpedantic is deliberately not included, since some people want to use it but not use -Wextra
# see e.g.
# https://github.com/mesonbuild/meson/issues/3275#issuecomment-641354956
# https://github.com/mesonbuild/meson/issues/3742
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra')
optargs = ('-O0', '-O2', '-O3', '-Os', '-Oz', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning(f'Consider using the built-in warning_level option instead of using "{arg}".',
location=self.current_node)
elif arg in optargs:
mlog.warning(f'Consider using the built-in optimization level instead of using "{arg}".',
location=self.current_node)
elif arg == '-Werror':
mlog.warning(f'Consider using the built-in werror option instead of using "{arg}".',
location=self.current_node)
elif arg == '-g':
mlog.warning(f'Consider using the built-in debug option instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning(f'Consider using the built-in option for sanitizers instead of using "{arg}".',
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning(f'Consider using the built-in option for language standard version instead of using "{arg}".',
location=self.current_node)
def _add_global_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.is_subproject():
msg = f'Function \'{node.func_name}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self._add_arguments(node, argsdict, frozen, args, kwargs)
def _add_project_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.Dict[str, T.List[str]]],
args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self._add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def _add_arguments(self, node: mparser.FunctionNode, argsdict: T.Dict[str, T.List[str]],
args_frozen: bool, args: T.List[str], kwargs: 'kwargs.FuncAddProjectArgs') -> None:
if args_frozen:
msg = f'Tried to use \'{node.func_name}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all arguments before your targets.'
raise InvalidCode(msg)
self._warn_about_builtin_args(args)
for lang in kwargs['language']:
argsdict[lang] = argsdict.get(lang, []) + args
@noArgsFlattening
@typed_pos_args('environment', optargs=[(str, list, dict)])
@typed_kwargs('environment', ENV_METHOD_KW, ENV_SEPARATOR_KW.evolve(since='0.62.0'))
def func_environment(self, node: mparser.FunctionNode, args: T.Tuple[T.Union[None, str, T.List['TYPE_var'], T.Dict[str, 'TYPE_var']]],
kwargs: 'TYPE_kwargs') -> build.EnvironmentVariables:
init = args[0]
if init is not None:
FeatureNew.single_use('environment positional arguments', '0.52.0', self.subproject, location=node)
msg = ENV_KW.validator(init)
if msg:
raise InvalidArguments(f'"environment": {msg}')
if isinstance(init, dict) and any(i for i in init.values() if isinstance(i, list)):
FeatureNew.single_use('List of string in dictionary value', '0.62.0', self.subproject, location=node)
return env_convertor_with_method(init, kwargs['method'], kwargs['separator'])
return build.EnvironmentVariables()
@typed_pos_args('join_paths', varargs=str, min_varargs=1)
@noKwargs
def func_join_paths(self, node: mparser.BaseNode, args: T.Tuple[T.List[str]], kwargs: 'TYPE_kwargs') -> str:
return os.path.join(*args[0]).replace('\\', '/')
def run(self) -> None:
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self) -> None:
# TODO cross compilation
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self) -> None:
if OptionKey('b_lundef') not in self.coredata.options:
return
if OptionKey('b_sanitize') not in self.coredata.options:
return
if (self.coredata.options[OptionKey('b_lundef')].value and
self.coredata.options[OptionKey('b_sanitize')].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.options[OptionKey('b_sanitize')].value),
location=self.current_node)
# Check that the indicated file is within the same subproject
# as we currently are. This is to stop people doing
# nasty things like:
#
# f = files('../../master_src/file.c')
#
# Note that this is validated only when the file
# object is generated. The result can be used in a different
# subproject than it is defined in (due to e.g. a
# declare_dependency).
def validate_within_subproject(self, subdir, fname):
srcdir = Path(self.environment.source_dir)
norm = Path(srcdir, subdir, fname).resolve()
if os.path.isdir(norm):
inputtype = 'directory'
else:
inputtype = 'file'
if srcdir not in norm.parents:
# Grabbing files outside the source tree is ok.
# This is for vendor stuff like:
#
# /opt/vendorsdk/src/file_with_license_restrictions.c
return
project_root = Path(srcdir, self.root_subdir)
subproject_dir = project_root / self.subproject_dir
if norm == project_root:
return
if project_root not in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} outside current (sub)project.')
if subproject_dir == norm or subproject_dir in norm.parents:
raise InterpreterException(f'Sandbox violation: Tried to grab {inputtype} {norm.name} from a nested subproject.')
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = True) -> T.List['mesonlib.File']: ...
@T.overload
def source_strings_to_files(self, sources: T.List['mesonlib.FileOrString'], strict: bool = False) -> T.List['mesonlib.FileOrString']: ... # noqa: F811
@T.overload
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: ... # noqa: F811
def source_strings_to_files(self, sources: T.List['SourceInputs'], strict: bool = True) -> T.List['SourceOutputs']: # noqa: F811
"""Lower inputs to a list of Targets and Files, replacing any strings.
:param sources: A raw (Meson DSL) list of inputs (targets, files, and
strings)
:raises InterpreterException: if any of the inputs are of an invalid type
:return: A list of Targets and Files
"""
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
results: T.List['SourceOutputs'] = []
for s in sources:
if isinstance(s, str):
if not strict and s.startswith(self.environment.get_build_dir()):
results.append(s)
mlog.warning(f'Source item {s!r} cannot be converted to File object, because it is a generated file. '
'This will become a hard error in the future.', location=self.current_node)
else:
self.validate_within_subproject(self.subdir, s)
results.append(mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s))
elif isinstance(s, mesonlib.File):
results.append(s)
elif isinstance(s, (build.GeneratedList, build.BuildTarget,
build.CustomTargetIndex, build.CustomTarget,
build.ExtractedObjects, build.StructuredSources)):
results.append(s)
else:
raise InterpreterException(f'Source item is {s!r} instead of '
'string or File-type object')
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if has_path_sep(name):
pathseg = os.path.join(self.subdir, os.path.split(name)[0])
if os.path.exists(os.path.join(self.source_root, pathseg)):
raise InvalidArguments(textwrap.dedent(f'''\
Target "{name}" has a path segment pointing to directory "{pathseg}". This is an error.
To define a target that builds in that directory you must define it
in the meson.build file in that directory.
'''))
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.FORBIDDEN_TARGET_NAMES:
raise InvalidArguments(f"Target name '{name}' is reserved for Meson's "
"internal use. Please rename.")
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode(f'Tried to create target "{name}", but a target of that name already exists.')
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_lib = self.build_target(node, args, kwargs, build.SharedLibrary)
# Check if user forces non-PIC static library.
pic = True
key = OptionKey('b_staticpic')
if 'pic' in kwargs:
pic = kwargs['pic']
elif key in self.environment.coredata.options:
pic = self.environment.coredata.options[key].value
if self.backend.name == 'xcode':
# Xcode is a bit special in that you can't (at least for the moment)
# form a library only from object file inputs. The simple but inefficient
# solution is to use the sources directly. This will lead to them being
# built twice. This is unfortunate and slow, but at least it works.
# Feel free to submit patches to get this fixed if it is an
# issue for you.
reuse_object_files = False
else:
reuse_object_files = pic
if reuse_object_files:
# Exclude sources from args and kwargs to avoid building them twice
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_lib.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_lib = self.build_target(node, static_args, static_kwargs, build.StaticLibrary)
return build.BothLibraries(shared_lib, static_lib)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_option(OptionKey('default_library', subproject=self.subproject))
if default_library == 'shared':
return self.build_target(node, args, kwargs, build.SharedLibrary)
elif default_library == 'static':
return self.build_target(node, args, kwargs, build.StaticLibrary)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException(f'Unknown default_library value: {default_library}.')
def build_target(self, node: mparser.BaseNode, args, kwargs, targetclass):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetclass not in {build.Executable, build.SharedLibrary, build.SharedModule, build.StaticLibrary, build.Jar}:
mlog.debug('Unknown target type:', str(targetclass))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
srcs: T.List['SourceInputs'] = []
struct: T.Optional[build.StructuredSources] = build.StructuredSources()
for s in sources:
if isinstance(s, build.StructuredSources):
struct = struct + s
else:
srcs.append(s)
if not struct:
struct = None
else:
# Validate that we won't end up with two outputs with the same name.
# i.e, don't allow:
# [structured_sources('foo/bar.rs'), structured_sources('bar/bar.rs')]
for v in struct.sources.values():
outputs: T.Set[str] = set()
for f in v:
o: T.List[str]
if isinstance(f, str):
o = [os.path.basename(f)]
elif isinstance(f, mesonlib.File):
o = [f.fname]
else:
o = f.get_outputs()
conflicts = outputs.intersection(o)
if conflicts:
raise InvalidArguments.from_node(
f"Conflicting sources in structured sources: {', '.join(sorted(conflicts))}",
node=node)
outputs.update(o)
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, srcs, struct, objs, self.environment, kwargs)
target.project_version = self.project_version
self.add_stdlib_info(target)
self.add_target(name, target)
self.project_args_frozen = True
return target
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
# BW compatibility. This was permitted so we must support it
# for a few releases so people can transition to "correct"
# path declarations.
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = set()
for i in target.sources:
for lang, c in self.coredata.compilers[target.for_machine].items():
if c.can_compile(i):
result.add(lang)
break
return result
def add_stdlib_info(self, target):
for l in self.get_used_languages(target):
dep = self.build.stdlibs[target.for_machine].get(l, None)
if dep:
target.add_deps(dep)
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException(f'Tried to add non-existing source file {s}.')
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget: mesonlib.HoldableObject) -> None:
if self.subproject != buildtarget.subproject:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self) -> bool:
return self.subproject != ''
@typed_pos_args('set_variable', str, object)
@noKwargs
@noArgsFlattening
@noSecondLevelHolderResolving
def func_set_variable(self, node: mparser.BaseNode, args: T.Tuple[str, object], kwargs: 'TYPE_kwargs') -> None:
varname, value = args
self.set_variable(varname, value, holderify=True)
@typed_pos_args('get_variable', (str, Disabler), optargs=[object])
@noKwargs
@noArgsFlattening
@unholder_return
def func_get_variable(self, node: mparser.BaseNode, args: T.Tuple[T.Union[str, Disabler], T.Optional[object]],
kwargs: 'TYPE_kwargs') -> 'TYPE_var':
varname, fallback = args
if isinstance(varname, Disabler):
return varname
try:
return self.variables[varname]
except KeyError:
if fallback is not None:
return self._holderify(fallback)
raise InterpreterException(f'Tried to get unknown variable "{varname}".')
@typed_pos_args('is_variable', str)
@noKwargs
def func_is_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> bool:
return args[0] in self.variables
@FeatureNew('unset_variable', '0.60.0')
@typed_pos_args('unset_variable', str)
@noKwargs
def func_unset_variable(self, node: mparser.BaseNode, args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> None:
varname = args[0]
try:
del self.variables[varname]
except KeyError:
raise InterpreterException(f'Tried to unset unknown variable "{varname}".')
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@typed_pos_args('is_disabler', object)
@noKwargs
def func_is_disabler(self, node: mparser.BaseNode, args: T.Tuple[object], kwargs: 'TYPE_kwargs') -> bool:
return isinstance(args[0], Disabler)
@noKwargs
@FeatureNew('range', '0.58.0')
@typed_pos_args('range', int, optargs=[int, int])
def func_range(self, node, args: T.Tuple[int, T.Optional[int], T.Optional[int]], kwargs: T.Dict[str, T.Any]) -> P_OBJ.RangeHolder:
start, stop, step = args
# Just like Python's range, we allow range(stop), range(start, stop), or
# range(start, stop, step)
if stop is None:
stop = start
start = 0
if step is None:
step = 1
# This is more strict than Python's range()
if start < 0:
raise InterpreterException('start cannot be negative')
if stop < start:
raise InterpreterException('stop cannot be less than start')
if step < 1:
raise InterpreterException('step must be >=1')
return P_OBJ.RangeHolder(start, stop, step, subproject=self.subproject)
| 49.734289 | 163 | 0.609897 | [
"Apache-2.0"
] | val-verde/python-meson | mesonbuild/interpreter/interpreter.py | 152,734 | Python |
import base64
from .fields import BaseField
class BaseTask(object):
def serialize(self, **result):
return result
class ProxyMixin(BaseTask):
def __init__(self, *args, **kwargs):
self.proxyType = kwargs.pop('proxy_type')
self.userAgent = kwargs.pop('user_agent')
self.proxyAddress = kwargs.pop('proxy_address')
self.proxyPort = kwargs.pop('proxy_port')
self.proxyLogin = kwargs.pop('proxy_login')
self.proxyPassword = kwargs.pop('proxy_password')
self.cookies = kwargs.pop('cookies', '')
super(ProxyMixin, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(ProxyMixin, self).serialize(**result)
result['userAgent'] = self.userAgent
result['proxyType'] = self.proxyType
result['proxyAddress'] = self.proxyAddress
result['proxyPort'] = self.proxyPort
if self.proxyLogin:
result['proxyLogin'] = self.proxyLogin
result['proxyPassword'] = self.proxyPassword
if self.cookies:
result['cookies'] = self.cookies
return result
class NoCaptchaTaskProxylessTask(BaseTask):
type = "NoCaptchaTaskProxyless"
websiteURL = None
websiteKey = None
websiteSToken = None
def __init__(self, website_url, website_key, website_s_token=None, is_invisible=None):
self.websiteURL = website_url
self.websiteKey = website_key
self.websiteSToken = website_s_token
self.isInvisible = is_invisible
def serialize(self):
data = {'type': self.type,
'websiteURL': self.websiteURL,
'websiteKey': self.websiteKey}
if self.websiteSToken is not None:
data['websiteSToken'] = self.websiteSToken
if self.isInvisible is not None:
data['isInvisible'] = self.isInvisible
return data
class FunCaptchaTask(ProxyMixin):
type = "FunCaptchaTask"
websiteURL = None
websiteKey = None
def __init__(self, website_url, website_key, *args, **kwargs):
self.websiteURL = website_url
self.websiteKey = website_key
super(FunCaptchaTask, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(FunCaptchaTask, self).serialize(**result)
result.update({'type': self.type,
'websiteURL': self.websiteURL,
'websitePublicKey': self.websiteKey})
return result
class NoCaptchaTask(ProxyMixin, NoCaptchaTaskProxylessTask):
type = "NoCaptchaTask"
class ImageToTextTask(object):
type = "ImageToTextTask"
fp = None
phrase = None
case = None
numeric = None
math = None
minLength = None
maxLength = None
def __init__(self, fp, phrase=None, case=None, numeric=None, math=None, min_length=None, max_length=None):
self.fp = fp
self.phrase = phrase
self.case = case
self.numeric = numeric
self.math = math
self.minLength = min_length
self.maxLength = max_length
def serialize(self):
return {'type': self.type,
'body': base64.b64encode(self.fp.read()).decode('utf-8'),
'phrase': self.phrase,
'case': self.case,
'numeric': self.numeric,
'math': self.math,
'minLength': self.minLength,
'maxLength': self.maxLength}
class CustomCaptchaTask(BaseTask):
type = 'CustomCaptchaTask'
imageUrl = None
assignment = None
form = None
def __init__(self, imageUrl, form=None, assignment=None):
self.imageUrl = imageUrl
self.form = form or {}
self.assignment = assignment
def serialize(self):
data = super(CustomCaptchaTask, self).serialize()
data.update({'type': self.type,
'imageUrl': self.imageUrl})
if self.form:
forms = []
for name, field in self.form.items():
if isinstance(field, BaseField):
forms.append(field.serialize(name))
else:
field = field.copy()
field['name'] = name
forms.append(field)
data['forms'] = forms
if self.assignment:
data['assignment'] = self.assignment
return data
class RecaptchaV3TaskProxyless(BaseTask):
type = 'RecaptchaV3TaskProxyless'
websiteURL = None
websiteKey = None
minScore = None
pageAction = None
def __init__(self, website_url, website_key, min_score, page_action):
self.websiteURL = website_url
self.websiteKey = website_key
self.minScore = min_score
self.pageAction = page_action
def serialize(self):
data = super(RecaptchaV3TaskProxyless, self).serialize()
data['type'] = self.type
data['websiteURL'] = self.websiteURL
data['websiteKey'] = self.websiteKey
data['minScore'] = self.minScore
data['pageAction'] = self.pageAction
return data
| 31.555556 | 110 | 0.607981 | [
"MIT"
] | uguraba/python-anticaptcha | python_anticaptcha/tasks.py | 5,112 | Python |
# -*- coding: utf-8 -*-
"""
.. _training-example:
Train Your Own Neural Network Potential
=======================================
This example shows how to use TorchANI to train a neural network potential
with the setup identical to NeuroChem. We will use the same configuration as
specified in `inputtrain.ipt`_
.. _`inputtrain.ipt`:
https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/inputtrain.ipt
.. note::
TorchANI provide tools to run NeuroChem training config file `inputtrain.ipt`.
See: :ref:`neurochem-training`.
"""
###############################################################################
# To begin with, let's first import the modules and setup devices we will use:
import torch
import torchani
import os
import math
import torch.utils.tensorboard
import tqdm
# helper function to convert energy unit from Hartree to kcal/mol
from torchani.units import hartree2kcalmol
# device to run the training
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
###############################################################################
# Now let's setup constants and construct an AEV computer. These numbers could
# be found in `rHCNO-5.2R_16-3.5A_a4-8.params`
# The atomic self energies given in `sae_linfit.dat`_ are computed from ANI-1x
# dataset. These constants can be calculated for any given dataset if ``None``
# is provided as an argument to the object of :class:`EnergyShifter` class.
#
# .. note::
#
# Besides defining these hyperparameters programmatically,
# :mod:`torchani.neurochem` provide tools to read them from file.
#
# .. _rHCNO-5.2R_16-3.5A_a4-8.params:
# https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/rHCNO-5.2R_16-3.5A_a4-8.params
# .. _sae_linfit.dat:
# https://github.com/aiqm/torchani/blob/master/torchani/resources/ani-1x_8x/sae_linfit.dat
Rcr = 5.2000e+00
Rca = 3.5000e+00
EtaR = torch.tensor([1.6000000e+01], device=device)
ShfR = torch.tensor([9.0000000e-01, 1.1687500e+00, 1.4375000e+00, 1.7062500e+00, 1.9750000e+00, 2.2437500e+00, 2.5125000e+00, 2.7812500e+00, 3.0500000e+00, 3.3187500e+00, 3.5875000e+00, 3.8562500e+00, 4.1250000e+00, 4.3937500e+00, 4.6625000e+00, 4.9312500e+00], device=device)
Zeta = torch.tensor([3.2000000e+01], device=device)
ShfZ = torch.tensor([1.9634954e-01, 5.8904862e-01, 9.8174770e-01, 1.3744468e+00, 1.7671459e+00, 2.1598449e+00, 2.5525440e+00, 2.9452431e+00], device=device)
EtaA = torch.tensor([8.0000000e+00], device=device)
ShfA = torch.tensor([9.0000000e-01, 1.5500000e+00, 2.2000000e+00, 2.8500000e+00], device=device)
num_species = 4
aev_computer = torchani.AEVComputer(Rcr, Rca, EtaR, ShfR, EtaA, Zeta, ShfA, ShfZ, num_species)
energy_shifter = torchani.utils.EnergyShifter(None)
species_to_tensor = torchani.utils.ChemicalSymbolsToInts(['H', 'C', 'N', 'O'])
###############################################################################
# Now let's setup datasets. These paths assumes the user run this script under
# the ``examples`` directory of TorchANI's repository. If you download this
# script, you should manually set the path of these files in your system before
# this script can run successfully.
#
# Also note that we need to subtracting energies by the self energies of all
# atoms for each molecule. This makes the range of energies in a reasonable
# range. The second argument defines how to convert species as a list of string
# to tensor, that is, for all supported chemical symbols, which is correspond to
# ``0``, which correspond to ``1``, etc.
try:
path = os.path.dirname(os.path.realpath(__file__))
except NameError:
path = os.getcwd()
dspath = os.path.join(path, '../dataset/ani1-up_to_gdb4/ani_gdb_s01.h5')
batch_size = 2560
training, validation = torchani.data.load(dspath).subtract_self_energies(energy_shifter).species_to_indices().shuffle().split(0.8, None)
training = training.collate(batch_size).cache()
validation = validation.collate(batch_size).cache()
print('Self atomic energies: ', energy_shifter.self_energies)
###############################################################################
# When iterating the dataset, we will get a dict of name->property mapping
#
###############################################################################
# Now let's define atomic neural networks.
H_network = torch.nn.Sequential(
torch.nn.Linear(384, 160),
torch.nn.CELU(0.1),
torch.nn.Linear(160, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
C_network = torch.nn.Sequential(
torch.nn.Linear(384, 144),
torch.nn.CELU(0.1),
torch.nn.Linear(144, 112),
torch.nn.CELU(0.1),
torch.nn.Linear(112, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
N_network = torch.nn.Sequential(
torch.nn.Linear(384, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 112),
torch.nn.CELU(0.1),
torch.nn.Linear(112, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
O_network = torch.nn.Sequential(
torch.nn.Linear(384, 128),
torch.nn.CELU(0.1),
torch.nn.Linear(128, 112),
torch.nn.CELU(0.1),
torch.nn.Linear(112, 96),
torch.nn.CELU(0.1),
torch.nn.Linear(96, 1)
)
nn = torchani.ANIModel([H_network, C_network, N_network, O_network])
print(nn)
###############################################################################
# Initialize the weights and biases.
#
# .. note::
# Pytorch default initialization for the weights and biases in linear layers
# is Kaiming uniform. See: `TORCH.NN.MODULES.LINEAR`_
# We initialize the weights similarly but from the normal distribution.
# The biases were initialized to zero.
#
# .. _TORCH.NN.MODULES.LINEAR:
# https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
def init_params(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.kaiming_normal_(m.weight, a=1.0)
torch.nn.init.zeros_(m.bias)
nn.apply(init_params)
###############################################################################
# Let's now create a pipeline of AEV Computer --> Neural Networks.
model = torchani.nn.Sequential(aev_computer, nn).to(device)
###############################################################################
# Now let's setup the optimizers. NeuroChem uses Adam with decoupled weight decay
# to updates the weights and Stochastic Gradient Descent (SGD) to update the biases.
# Moreover, we need to specify different weight decay rate for different layes.
#
# .. note::
#
# The weight decay in `inputtrain.ipt`_ is named "l2", but it is actually not
# L2 regularization. The confusion between L2 and weight decay is a common
# mistake in deep learning. See: `Decoupled Weight Decay Regularization`_
# Also note that the weight decay only applies to weight in the training
# of ANI models, not bias.
#
# .. _Decoupled Weight Decay Regularization:
# https://arxiv.org/abs/1711.05101
AdamW = torchani.optim.AdamW([
# H networks
{'params': [H_network[0].weight]},
{'params': [H_network[2].weight], 'weight_decay': 0.00001},
{'params': [H_network[4].weight], 'weight_decay': 0.000001},
{'params': [H_network[6].weight]},
# C networks
{'params': [C_network[0].weight]},
{'params': [C_network[2].weight], 'weight_decay': 0.00001},
{'params': [C_network[4].weight], 'weight_decay': 0.000001},
{'params': [C_network[6].weight]},
# N networks
{'params': [N_network[0].weight]},
{'params': [N_network[2].weight], 'weight_decay': 0.00001},
{'params': [N_network[4].weight], 'weight_decay': 0.000001},
{'params': [N_network[6].weight]},
# O networks
{'params': [O_network[0].weight]},
{'params': [O_network[2].weight], 'weight_decay': 0.00001},
{'params': [O_network[4].weight], 'weight_decay': 0.000001},
{'params': [O_network[6].weight]},
])
SGD = torch.optim.SGD([
# H networks
{'params': [H_network[0].bias]},
{'params': [H_network[2].bias]},
{'params': [H_network[4].bias]},
{'params': [H_network[6].bias]},
# C networks
{'params': [C_network[0].bias]},
{'params': [C_network[2].bias]},
{'params': [C_network[4].bias]},
{'params': [C_network[6].bias]},
# N networks
{'params': [N_network[0].bias]},
{'params': [N_network[2].bias]},
{'params': [N_network[4].bias]},
{'params': [N_network[6].bias]},
# O networks
{'params': [O_network[0].bias]},
{'params': [O_network[2].bias]},
{'params': [O_network[4].bias]},
{'params': [O_network[6].bias]},
], lr=1e-3)
###############################################################################
# Setting up a learning rate scheduler to do learning rate decay
AdamW_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(AdamW, factor=0.5, patience=100, threshold=0)
SGD_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(SGD, factor=0.5, patience=100, threshold=0)
###############################################################################
# Train the model by minimizing the MSE loss, until validation RMSE no longer
# improves during a certain number of steps, decay the learning rate and repeat
# the same process, stop until the learning rate is smaller than a threshold.
#
# We first read the checkpoint files to restart training. We use `latest.pt`
# to store current training state.
latest_checkpoint = 'latest.pt'
###############################################################################
# Resume training from previously saved checkpoints:
if os.path.isfile(latest_checkpoint):
checkpoint = torch.load(latest_checkpoint)
nn.load_state_dict(checkpoint['nn'])
AdamW.load_state_dict(checkpoint['AdamW'])
SGD.load_state_dict(checkpoint['SGD'])
AdamW_scheduler.load_state_dict(checkpoint['AdamW_scheduler'])
SGD_scheduler.load_state_dict(checkpoint['SGD_scheduler'])
###############################################################################
# During training, we need to validate on validation set and if validation error
# is better than the best, then save the new best model to a checkpoint
def validate():
# run validation
mse_sum = torch.nn.MSELoss(reduction='sum')
total_mse = 0.0
count = 0
for properties in validation:
species = properties['species'].to(device)
coordinates = properties['coordinates'].to(device).float()
true_energies = properties['energies'].to(device).float()
_, predicted_energies = model((species, coordinates))
total_mse += mse_sum(predicted_energies, true_energies).item()
count += predicted_energies.shape[0]
return hartree2kcalmol(math.sqrt(total_mse / count))
###############################################################################
# We will also use TensorBoard to visualize our training process
tensorboard = torch.utils.tensorboard.SummaryWriter()
###############################################################################
# Finally, we come to the training loop.
#
# In this tutorial, we are setting the maximum epoch to a very small number,
# only to make this demo terminate fast. For serious training, this should be
# set to a much larger value
mse = torch.nn.MSELoss(reduction='none')
print("training starting from epoch", AdamW_scheduler.last_epoch + 1)
max_epochs = 10
early_stopping_learning_rate = 1.0E-5
best_model_checkpoint = 'best.pt'
for _ in range(AdamW_scheduler.last_epoch + 1, max_epochs):
rmse = validate()
print('RMSE:', rmse, 'at epoch', AdamW_scheduler.last_epoch + 1)
learning_rate = AdamW.param_groups[0]['lr']
if learning_rate < early_stopping_learning_rate:
break
# checkpoint
if AdamW_scheduler.is_better(rmse, AdamW_scheduler.best):
torch.save(nn.state_dict(), best_model_checkpoint)
AdamW_scheduler.step(rmse)
SGD_scheduler.step(rmse)
tensorboard.add_scalar('validation_rmse', rmse, AdamW_scheduler.last_epoch)
tensorboard.add_scalar('best_validation_rmse', AdamW_scheduler.best, AdamW_scheduler.last_epoch)
tensorboard.add_scalar('learning_rate', learning_rate, AdamW_scheduler.last_epoch)
for i, properties in tqdm.tqdm(
enumerate(training),
total=len(training),
desc="epoch {}".format(AdamW_scheduler.last_epoch)
):
species = properties['species'].to(device)
coordinates = properties['coordinates'].to(device).float()
true_energies = properties['energies'].to(device).float()
num_atoms = (species >= 0).sum(dim=1, dtype=true_energies.dtype)
_, predicted_energies = model((species, coordinates))
loss = (mse(predicted_energies, true_energies) / num_atoms.sqrt()).mean()
AdamW.zero_grad()
SGD.zero_grad()
loss.backward()
AdamW.step()
SGD.step()
# write current batch loss to TensorBoard
tensorboard.add_scalar('batch_loss', loss, AdamW_scheduler.last_epoch * len(training) + i)
torch.save({
'nn': nn.state_dict(),
'AdamW': AdamW.state_dict(),
'SGD': SGD.state_dict(),
'AdamW_scheduler': AdamW_scheduler.state_dict(),
'SGD_scheduler': SGD_scheduler.state_dict(),
}, latest_checkpoint)
| 39.366071 | 276 | 0.642927 | [
"MIT"
] | isayev/torchani | examples/nnp_training.py | 13,227 | Python |
import pytest
import uqbar.strings
import supriya.patterns
pattern_01 = supriya.patterns.Pn(
supriya.patterns.Pbind(foo=supriya.patterns.Pseq(["A", "B", "C"])), repetitions=2
)
pattern_02 = supriya.patterns.Pn(
supriya.patterns.Pbind(foo=supriya.patterns.Pseq(["A", "B", "C"])),
key="repeat",
repetitions=3,
)
def test___iter___01():
events = list(pattern_01)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
foo='A',
uuid=UUID('A'),
)
NoteEvent(
foo='B',
uuid=UUID('B'),
)
NoteEvent(
foo='C',
uuid=UUID('C'),
)
NoteEvent(
foo='A',
uuid=UUID('D'),
)
NoteEvent(
foo='B',
uuid=UUID('E'),
)
NoteEvent(
foo='C',
uuid=UUID('F'),
)
"""
)
def test___iter___02():
events = list(pattern_02)
assert pytest.helpers.get_objects_as_string(
events, replace_uuids=True
) == uqbar.strings.normalize(
"""
NoteEvent(
foo='A',
repeat=True,
uuid=UUID('A'),
)
NoteEvent(
foo='B',
uuid=UUID('B'),
)
NoteEvent(
foo='C',
uuid=UUID('C'),
)
NoteEvent(
foo='A',
repeat=True,
uuid=UUID('D'),
)
NoteEvent(
foo='B',
uuid=UUID('E'),
)
NoteEvent(
foo='C',
uuid=UUID('F'),
)
NoteEvent(
foo='A',
repeat=True,
uuid=UUID('G'),
)
NoteEvent(
foo='B',
uuid=UUID('H'),
)
NoteEvent(
foo='C',
uuid=UUID('I'),
)
"""
)
| 20.030303 | 85 | 0.420575 | [
"MIT"
] | butayama/supriya | tests/patterns/test_patterns_Pn.py | 1,983 | Python |
""" A QuoteController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.Quote import Quote
class QuoteController(Controller):
def __init__(self, request: Request):
self.request = request
def show(self):
id = self.request.param("id")
return Quote.find(id)
def index(self):
return Quote.all()
def create(self):
subject = self.request.input("subject")
quote = Quote.create({"subject": subject})
return quote
def update(self):
subject = self.request.input("subject")
id = self.request.param("id")
Quote.where("id", id).update({"subject": subject})
return Quote.where("id", id).get()
def destroy(self):
id = self.request.param("id")
quote = Quote.where("id", id).get()
Quote.where("id", id).delete()
return quote
| 26.764706 | 58 | 0.613187 | [
"MIT"
] | code-weather/capstone_backend_tailwindCSS | app/http/controllers/QuoteController.py | 910 | Python |
class Errors:
def __init__(self):
pass
def min_nonetype(self):
pass
| 13.25 | 27 | 0.5 | [
"MIT"
] | bailez/matfin | matfin/utils/utils.py | 106 | Python |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from .chromatic import *
| 26.5 | 57 | 0.641509 | [
"MIT"
] | Jyvol/enterprise_extensions | enterprise_extensions/chromatic/__init__.py | 159 | Python |
"""
Component to interface with various locks that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/lock/
"""
import asyncio
from datetime import timedelta
import functools as ft
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, STATE_LOCKED, STATE_UNLOCKED,
STATE_UNKNOWN, SERVICE_LOCK, SERVICE_UNLOCK)
from homeassistant.components import group
DOMAIN = 'lock'
SCAN_INTERVAL = timedelta(seconds=30)
ATTR_CHANGED_BY = 'changed_by'
GROUP_NAME_ALL_LOCKS = 'all locks'
ENTITY_ID_ALL_LOCKS = group.ENTITY_ID_FORMAT.format('all_locks')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
LOCK_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_CODE): cv.string,
})
_LOGGER = logging.getLogger(__name__)
def is_locked(hass, entity_id=None):
"""Return if the lock is locked based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LOCKS
return hass.states.is_state(entity_id, STATE_LOCKED)
def lock(hass, entity_id=None, code=None):
"""Lock all or specified locks."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_LOCK, data)
def unlock(hass, entity_id=None, code=None):
"""Unlock all or specified locks."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_UNLOCK, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for locks."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_lock_service(service):
"""Handle calls to the lock services."""
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if service.service == SERVICE_LOCK:
yield from entity.async_lock(code=code)
else:
yield from entity.async_unlock(code=code)
update_tasks = []
for entity in target_locks:
if not entity.should_poll:
continue
update_coro = hass.loop.create_task(
entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_UNLOCK, async_handle_lock_service,
descriptions.get(SERVICE_UNLOCK), schema=LOCK_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_LOCK, async_handle_lock_service,
descriptions.get(SERVICE_LOCK), schema=LOCK_SERVICE_SCHEMA)
return True
class LockDevice(Entity):
"""Representation of a lock."""
@property
def changed_by(self):
"""Last change triggered by."""
return None
# pylint: disable=no-self-use
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def is_locked(self):
"""Return true if the lock is locked."""
return None
def lock(self, **kwargs):
"""Lock the lock."""
raise NotImplementedError()
def async_lock(self, **kwargs):
"""Lock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.lock, **kwargs))
def unlock(self, **kwargs):
"""Unlock the lock."""
raise NotImplementedError()
def async_unlock(self, **kwargs):
"""Unlock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.unlock, **kwargs))
@property
def state_attributes(self):
"""Return the state attributes."""
if self.code_format is None:
return None
state_attr = {
ATTR_CODE_FORMAT: self.code_format,
ATTR_CHANGED_BY: self.changed_by
}
return state_attr
@property
def state(self):
"""Return the state."""
locked = self.is_locked
if locked is None:
return STATE_UNKNOWN
return STATE_LOCKED if locked else STATE_UNLOCKED
| 28.945652 | 78 | 0.672174 | [
"Apache-2.0"
] | Norien/Home-Assistant | homeassistant/components/lock/__init__.py | 5,326 | Python |
"""
Created on Sun Feb 2 13:28:48 2020
@author: matias
"""
import numpy as np
from numpy.linalg import inv
from matplotlib import pyplot as plt
import time
import camb
from scipy.integrate import cumtrapz as cumtrapz
from scipy.integrate import simps as simps
from scipy.interpolate import interp1d
from scipy.constants import c as c_luz #metros/segundos
c_luz_km = c_luz/1000
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_int import Hubble_teorico
from funciones_AGN import zs_2_logDlH0
#%%
'''
DEPRECATED: Antes de eliminar este archivo copiar este ejemplo en otro .py
en donde se grafiquen los datos.
'''
if __name__ == '__main__':
from scipy.constants import c as c_luz #metros/segundos
from matplotlib import pyplot as plt
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_AGN
#%%
def leer_data_AGN(archivo_AGN):
z, Fuv, eFuv, Fx, eFx = np.loadtxt(archivo_AGN,
usecols=(3,4,5,6,7), unpack=True)
arr1inds = z.argsort()
sorted_z = z[arr1inds]
sorted_Fuv = Fuv[arr1inds]
sorted_eFuv = eFuv[arr1inds]
sorted_Fx = Fx[arr1inds]
sorted_eFx = eFx[arr1inds]
return sorted_z, sorted_Fuv, sorted_eFuv, sorted_Fx, sorted_eFx
#Data AGN
os.chdir(path_git+'/Software/Estadística/Datos/Datos_AGN')
data_agn = leer_data_AGN('table3.dat')
H_0 = 70
omega_m = 0.99
gamma = 0.64
beta = 7
delta = 0.3
theta = [omega_m,beta,gamma,delta]
#params_to_chi2_AGN_nuisance(theta, _, data_agn)/(len(z_data)-4)
data_agn = leer_data_AGN('table3.dat')
z_data_1, logFuv_1, eFuv_1, logFx_1, eFx_1 = data_agn
zmin = 0
zmax = 100
mask = (z_data_1 > zmin) & (z_data_1 < zmax)
z_data = z_data_1[mask]
logFuv = logFuv_1[mask]
logFx = logFx_1[mask]
eFx = eFx_1[mask]
eFuv = eFuv_1[mask]
zs_modelo = np.linspace(0,30,10**6)
Dl_teo = -np.log10(H_0) + zs_2_logDlH0(zs_modelo,omega_m,z_data)
Dl_teo_cm = Dl_teo - np.log10(3.24) + 25
psi = beta + gamma * logFuv + 2 * (gamma-1) * (Dl_teo_cm + 0.5 * np.log10(4*np.pi))
si_2 = eFx**2 + (gamma * eFuv)**2 + np.exp(2*np.log(delta)) #El cuadrado de los errores
#si_2 = eFx**2 + (gamma * eFuv)**2 + delta**2 #El cuadrado de los errores
print(np.sum(si_2))
chi2_AGN = np.sum( ((logFx-psi)**2/si_2) + np.log(2*np.pi*si_2)) # menos en el paper
print(chi2_AGN)
print(chi2_AGN/(len(z_data)-4))
plt.figure()
plt.xlabel('z (redshift)')
plt.ylabel(r'$Fx$')
plt.errorbar(z_data,psi,np.sqrt(si_2),marker='.',linestyle='')
plt.plot(z_data,logFx,'.r')
| 28.557692 | 91 | 0.676431 | [
"MIT"
] | matiasleize/tesis_licenciatura | Software/Funcionales/funciones_LambdaCDM_AGN.py | 2,971 | Python |
# mypy: allow-untyped-defs
import subprocess
from functools import partial
from typing import Callable
from mozlog import get_default_logger
from wptserve.utils import isomorphic_decode
logger = None
def vcs(bin_name: str) -> Callable[..., None]:
def inner(command, *args, **kwargs):
global logger
if logger is None:
logger = get_default_logger("vcs")
repo = kwargs.pop("repo", None)
log_error = kwargs.pop("log_error", True)
stdout = kwargs.pop("stdout", None)
stdin = kwargs.pop("stdin", None)
if kwargs:
raise TypeError(kwargs)
args = list(args)
proc_kwargs = {}
if repo is not None:
# Make sure `cwd` is str type to work in different sub-versions of Python 3.
# Before 3.8, bytes were not accepted on Windows for `cwd`.
proc_kwargs["cwd"] = isomorphic_decode(repo)
if stdout is not None:
proc_kwargs["stdout"] = stdout
if stdin is not None:
proc_kwargs["stdin"] = stdin
command_line = [bin_name, command] + args
logger.debug(" ".join(command_line))
try:
func = subprocess.check_output if not stdout else subprocess.check_call
return func(command_line, stderr=subprocess.STDOUT, **proc_kwargs)
except OSError as e:
if log_error:
logger.error(e)
raise
except subprocess.CalledProcessError as e:
if log_error:
logger.error(e.output)
raise
return inner
git = vcs("git")
hg = vcs("hg")
def bind_to_repo(vcs_func, repo, log_error=True):
return partial(vcs_func, repo=repo, log_error=log_error)
def is_git_root(path, log_error=True):
try:
rv = git("rev-parse", "--show-cdup", repo=path, log_error=log_error)
except subprocess.CalledProcessError:
return False
return rv == b"\n"
| 28.735294 | 88 | 0.61259 | [
"BSD-3-Clause"
] | BasixKOR/wpt | tools/wptrunner/wptrunner/vcs.py | 1,954 | Python |
import socket
import struct
import os
PACKET_SIZE = 1024
TIME_OUT = 5
SUCCESS = b'File Has Been Transferred'
def getPayload(fileName):
try:
with open(file=fileName, mode="r+b") as readFile:
payload = readFile.read()
if len(payload) == 0:
print("That is a blank file.\nProgram now exiting ...")
exit()
return payload
except FileNotFoundError:
print("\nNo payload file.\nProgram now exiting ...")
exit()
def main():
# fileName = "test.txt"
# serverIP = "127.0.0.1"
# serverPort = 5005
fileName = input("Enter path of the file to be sent to the server:\n")
payload = getPayload(fileName=fileName)
print("File Found ...")
serverIP = input("\nEnter the IP Address of the server:\n")
if serverIP is None:
print("Cannot leave server IP address blank.\nProgram now exiting ...")
exit()
try:
serverPort = int(input("\nEnter the Port of the server:\n"))
except ValueError as ve:
print("Please provide a valid port number. Should only contain character 0-9.\nProgram now exiting ...")
exit()
if serverPort is None:
print("Cannot leave server port blank.\nProgram now exiting ...")
exit()
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(TIME_OUT)
sock.settimeout(TIME_OUT)
sock.connect((serverIP, serverPort))
print("\nTransferring File ...")
name = fileName.split("/")[-1]
nameBytes = name.encode("utf-8")
nameLength = len(nameBytes)
nameSizeBytes = struct.pack("I", nameLength)
payloadLength = len(payload) + 8 + nameLength
numPackets = (payloadLength // PACKET_SIZE)
if (payloadLength / PACKET_SIZE) > numPackets:
numPackets += 1
packedNumBytes = struct.pack('I', numPackets)
header = packedNumBytes + nameSizeBytes + nameBytes
payload = header + payload
sock.sendall(payload)
data = sock.recv(PACKET_SIZE)
print("\nStatus:")
print(data.decode("utf-8"))
print("\nProgram done ...")
except ConnectionRefusedError or ConnectionResetError as e:
print(f"\n{e} Error Occurred. Check for correct server IP Address and Ports. Check server status.\nProgram now exiting ...")
except Exception as e:
print(f"\n{e} error has broken things.")
if __name__ == '__main__':
os.system("clear")
main()
| 28.413043 | 132 | 0.596787 | [
"MIT"
] | brendo61-byte/ECE_456 | Labs/Lab_4/Lab_4/client.py | 2,614 | Python |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import re
from sqlalchemy.orm import validates
from ggrc import db
from ggrc import settings
from ggrc.models.computed_property import computed_property
from ggrc.models.context import HasOwnContext
from ggrc.models.exceptions import ValidationError
from ggrc.models.mixins import deferred, Base, CustomAttributable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.utils import validate_option
class Person(CustomAttributable, HasOwnContext, Relatable, Base, db.Model):
__tablename__ = 'people'
email = deferred(db.Column(db.String, nullable=False), 'Person')
name = deferred(db.Column(db.String), 'Person')
language_id = deferred(db.Column(db.Integer), 'Person')
company = deferred(db.Column(db.String), 'Person')
object_people = db.relationship(
'ObjectPerson', backref='person', cascade='all, delete-orphan')
object_owners = db.relationship(
'ObjectOwner', backref='person', cascade='all, delete-orphan')
language = db.relationship(
'Option',
primaryjoin='and_(foreign(Person.language_id) == Option.id, '
'Option.role == "person_language")',
uselist=False,
)
@staticmethod
def _extra_table_args(cls):
return (
db.Index('ix_people_name_email', 'name', 'email'),
db.Index('uq_people_email', 'email', unique=True),
)
_fulltext_attrs = [
'company',
'email',
'name',
]
_publish_attrs = [
'company',
'email',
'language',
'name',
PublishOnly('object_people'),
PublishOnly('system_wide_role'),
]
_sanitize_html = [
'company',
'name',
]
_include_links = []
_aliases = {
"name": "Name",
"email": {
"display_name": "Email",
"unique": True,
},
"company": "Company",
"user_role": {
"display_name": "Role",
"type": "user_role",
"filter_by": "_filter_by_user_role",
},
}
@classmethod
def _filter_by_user_role(cls, predicate):
from ggrc_basic_permissions.models import Role, UserRole
return UserRole.query.join(Role).filter(
(UserRole.person_id == cls.id) &
(UserRole.context_id == None) &
predicate(Role.name)
).exists()
# Methods required by Flask-Login
def is_authenticated(self):
return True
def is_active(self):
return True # self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id) # noqa
@validates('language')
def validate_person_options(self, key, option):
return validate_option(self.__class__.__name__, key, option,
'person_language')
@validates('email')
def validate_email(self, key, email):
if not Person.is_valid_email(email):
message = "Must provide a valid email address"
raise ValidationError(message)
return email
@staticmethod
def is_valid_email(val):
# Borrowed from Django
# literal form, ipv4 address (SMTP 4.1.3)
email_re = re.compile(
'^[-!#$%&\'*+\\.\/0-9=?A-Z^_`{|}~]+@([-0-9A-Z]+\.)+([0-9A-Z]){2,4}$',
re.IGNORECASE)
return email_re.match(val) if val else False
@classmethod
def eager_query(cls):
from sqlalchemy import orm
# query = super(Person, cls).eager_query()
# Completely overriding eager_query to avoid eager loading of the
# modified_by relationship
return super(Person, cls).eager_query().options(
orm.joinedload('language'),
orm.subqueryload('object_people'),
)
def _display_name(self):
return self.email
@computed_property
def system_wide_role(self):
"""For choosing the role string to show to the user; of all the roles in
the system-wide context, it shows the highest ranked one (if there are
multiple) or "No Access" if there are none.
"""
# FIXME: This method should be in `ggrc_basic_permissions`, since it
# depends on `Role` and `UserRole` objects
if self.email in getattr(settings, "BOOTSTRAP_ADMIN_USERS", []):
return u"Superuser"
ROLE_HIERARCHY = {
u'gGRC Admin': 0,
u'Editor': 1,
u'Reader': 2,
u'Creator': 3,
}
system_wide_roles = ROLE_HIERARCHY.keys()
unique_roles = set([
user_role.role.name
for user_role in self.user_roles
if user_role.role.name in system_wide_roles
])
if len(unique_roles) == 0:
return u"No Access"
else:
# -1 as default to make items not in this list appear on top
# and thus shown to the user
sorted_roles = sorted(unique_roles,
key=lambda x: ROLE_HIERARCHY.get(x, -1))
return sorted_roles[0]
| 29.360947 | 78 | 0.654373 | [
"ECL-2.0",
"Apache-2.0"
] | mikecb/ggrc-core | src/ggrc/models/person.py | 4,962 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training functions for Gradient boosted decision trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from tensorflow.contrib import learn
from tensorflow.contrib import stateless
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import device_setter
# Key names for prediction dict.
ENSEMBLE_STAMP = "ensemble_stamp"
PREDICTIONS = "predictions"
PARTITION_IDS = "partition_ids"
NUM_LAYERS_ATTEMPTED = "num_layers"
NUM_TREES_ATTEMPTED = "num_trees"
NUM_USED_HANDLERS = "num_used_handlers"
USED_HANDLERS_MASK = "used_handlers_mask"
LEAF_INDEX = "leaf_index"
_FEATURE_NAME_TEMPLATE = "%s_%d"
# Keys in Training state.
GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [
"num_layer_examples", "num_layer_steps", "num_layers", "active_tree",
"active_layer", "continue_centering", "bias_stats_accumulator",
"steps_accumulator", "handlers"
])
def _get_column_by_index(tensor, indices):
"""Returns columns from a 2-D tensor by index."""
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [-1])
i_flat = array_ops.reshape(
array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +
indices, [-1])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])
def _make_predictions_dict(stamp,
logits,
partition_ids,
ensemble_stats,
used_handlers,
leaf_index=None):
"""Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
"""
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if leaf_index is not None:
result[LEAF_INDEX] = leaf_index
return result
class _OpRoundRobinStrategy(object):
"""Returns the next ps task index for placement via per-Op round-robin order.
This strategy works slightly better for the GBDT graph because of using
custom resources which vary significantly in compute cost.
"""
def __init__(self, ps_ops, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
"""
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (next_task + 1) % num_tasks if num_tasks else 0
self._num_tasks = num_tasks
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
"""
if op.type not in self._next_task_per_op:
raise ValueError("Unknown op type '%s' for placement:" % op.type)
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks
if self._num_tasks else 0)
return task
def extract_features(features, feature_columns, use_core_columns):
"""Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
"""
if not features:
raise ValueError("Features dictionary must be specified.")
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
features = copy.copy(features)
if feature_columns:
scope = "gbdt"
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
# pylint: disable=protected-access
if use_core_columns:
# pylint: disable=protected-access
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
# pylint: enable=protected-access
transformed_features[fc.name] = fc_core.input_layer(
features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if len(result) > 1:
raise ValueError("Unexpected number of output features")
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
# TODO(nponomareva): consider iterating over feature columns instead.
if isinstance(tensor, tuple):
# Weighted categorical feature.
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([
array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),
array_ops.expand_dims(
math_ops.to_int64(categorical_tensor.values), -1)
], 1)
tensor = sparse_tensor.SparseTensor(
indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if tensor.values.dtype == dtypes.float32:
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif tensor.values.dtype == dtypes.int64:
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError("Unsupported sparse feature %s with dtype %s." %
(tensor.indices.name, tensor.dtype))
else:
if tensor.dtype == dtypes.float32:
if len(tensor.shape) > 1 and tensor.shape[1] > 1:
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))
dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError("Unsupported dense feature %s with dtype %s." %
(tensor.name, tensor.dtype))
# Feature columns are logically organized into incrementing slots starting
# from dense floats, then sparse floats then sparse ints.
fc_names = (dense_float_names + sparse_float_names + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes)
def _dropout_params(mode, ensemble_stats):
"""Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
"""
if mode == learn.ModeKeys.TRAIN:
# Do dropout only during training.
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = -1
apply_dropout = False
return apply_dropout, seed
class GradientBoostedDecisionTreeModel(object):
"""A GBDT model function."""
def __init__(self,
is_chief,
num_ps_replicas,
ensemble_handle,
center_bias,
examples_per_layer,
learner_config,
features,
logits_dimension,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
output_leaf_index=False,
output_leaf_index_modes=None,
num_quantiles=100):
"""Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
"""
if ensemble_handle is None:
raise ValueError("ensemble_handle must be specified.")
if learner_config is None:
raise ValueError("learner_config must be specified.")
if learner_config.num_classes < 2:
raise ValueError("Number of classes must be >=2")
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
# Check loss reduction value.
if (loss_reduction != losses.Reduction.SUM and
loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
raise ValueError(
"Invalid loss reduction is provided: %s." % loss_reduction)
self._loss_reduction = loss_reduction
# Fill in the defaults.
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if logits_dimension == 1:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
else:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
if logits_dimension == 1 or learner_config.multi_class_strategy == (
learner_pb2.LearnerConfig.TREE_PER_CLASS):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError("Center bias should be False for multiclass.")
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape(
([logits_dimension, logits_dimension]))
else:
# Diagonal hessian strategy.
self._hessian_shape = tensor_shape.TensorShape(([logits_dimension]))
if (learner_config.growing_mode ==
learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.pruning_mode ==
learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if learner_config.constraints.max_tree_depth == 0:
# Use 6 as the default maximum depth.
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof("tuner")
if not tuner:
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(
initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="attempted_trees")
self._finalized_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="finalized_trees")
if not features:
raise ValueError("Features dictionary must be specified.")
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices,
sparse_int_values, sparse_int_shapes) = extract_features(
features, self._feature_columns, use_core_columns)
logging.info("Active Feature Columns: " + str(fc_names))
logging.info("Learner config: " + str(learner_config))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = (
self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
learner_config.num_classes == 2)
if output_leaf_index_modes is None:
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif not all(
mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,
learn.ModeKeys.INFER) for mode in output_leaf_index_modes):
raise ValueError("output_leaf_index_modes should only contain ModeKeys.")
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
"""Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)
num_handlers = (
len(self._dense_floats) + len(self._sparse_float_shapes) + len(
self._sparse_int_shapes))
# Used during feature selection.
used_handlers = model_ops.tree_ensemble_used_handlers(
ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
# We don't need dropout info - we can always restore it based on the
# seed.
apply_dropout, seed = _dropout_params(mode, ensemble_stats)
# Make sure ensemble stats run. This will check that the ensemble has
# the right stamp.
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if self._output_leaf_index and mode in self._output_leaf_index_modes:
predictions, _, leaf_index = (
prediction_ops).gradient_trees_prediction_verbose(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
else:
leaf_index = None
predictions, _ = prediction_ops.gradient_trees_prediction(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(
ensemble_handle,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,
ensemble_stats, used_handlers, leaf_index)
def predict(self, mode):
"""Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
"""
# Use the current ensemble to predict on the current batch of input.
# For faster prediction we check if the inputs are on the same device
# as the model. If not, we create a copy of the model on the worker.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
if not input_deps:
raise ValueError("No input tensors for prediction.")
# Get most current model stamp.
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
# Determine if ensemble is colocated with the inputs.
if self._ensemble_handle.device != input_deps[0].device:
# Create a local ensemble and get its local stamp.
with ops.name_scope("local_ensemble", "TreeEnsembleVariable") as name:
local_ensemble_handle = (
gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name))
create_op = gen_model_ops.create_tree_ensemble_variable(
local_ensemble_handle, stamp_token=-1, tree_ensemble_config="")
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(
local_ensemble_handle)
# Determine whether the local ensemble is stale and update it if needed.
def _refresh_local_ensemble_fn():
# Serialize the model from parameter server after reading the inputs.
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = (
model_ops.tree_ensemble_serialize(self._ensemble_handle))
# Update local ensemble with the serialized model from parameter server.
with ops.control_dependencies([create_op]):
return model_ops.tree_ensemble_deserialize(
local_ensemble_handle,
stamp_token=ensemble_stamp,
tree_ensemble_config=serialized_model), ensemble_stamp
refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(
math_ops.not_equal(ensemble_stamp,
local_stamp), _refresh_local_ensemble_fn,
lambda: (control_flow_ops.no_op(), ensemble_stamp))
# Once updated, use the local model for prediction.
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle,
ensemble_stamp, mode)
else:
# Use ensemble_handle directly, if colocated.
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle,
ensemble_stamp, mode)
def _get_class_id(self, predictions_dict):
# Handle different multiclass strategies.
if (self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
self._logits_dimension != 1):
# Choose the class for which the tree is built (one vs rest).
return math_ops.to_int32(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension)
return constant_op.constant(-1, dtype=dtypes.int32)
def update_stats(self, loss, predictions_dict):
"""Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
"""
# Get the worker device from input dependencies.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
worker_device = input_deps[0].device
# Get tensors relevant for training and form the loss.
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
gradients = gradients_impl.gradients(
loss,
predictions,
name="Gradients",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
# Handle different multiclass strategies.
if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:
# We build one vs rest trees.
if self._logits_dimension == 1:
# We have only 1 score, gradients is of shape [batch, 1].
hessians = gradients_impl.gradients(
gradients,
predictions,
name="Hessian",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
# Assemble hessian list into a tensor.
hessians = array_ops.stack(hessian_list, axis=1)
# Use class id tensor to get the column with that index from gradients
# and hessians.
squeezed_gradients = array_ops.squeeze(
_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(
_get_column_by_index(hessians, class_id))
else:
# Other multiclass strategies.
if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:
hessian_list = self._full_hessian(gradients, predictions)
else:
# Diagonal hessian strategy.
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
# Get the weights for each example for quantiles calculation,
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
# Create all handlers ensuring resources are evenly allocated across PS.
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(
self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(
self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(
self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(
self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(
self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = 1.0 / num_quantiles
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
# Create handlers for dense float columns
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.DenseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
dense_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
dense_float_column=self._dense_floats[dense_float_column_idx],
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type,
))
fc_name_idx += 1
# Create handlers for sparse float columns.
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.SparseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
sparse_float_column=sparse_tensor.SparseTensor(
self._sparse_float_indices[sparse_float_column_idx],
self._sparse_float_values[sparse_float_column_idx],
self._sparse_float_shapes[sparse_float_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create handlers for sparse int columns.
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
categorical_split_handler.EqualitySplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_int_column_idx),
sparse_int_column=sparse_tensor.SparseTensor(
self._sparse_int_indices[sparse_int_column_idx],
self._sparse_int_values[sparse_int_column_idx],
self._sparse_int_shapes[sparse_int_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type))
fc_name_idx += 1
# Create ensemble stats variables.
num_layer_examples = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_examples",
trainable=False)
num_layer_steps = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_steps",
trainable=False)
num_layers = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layers",
trainable=False)
active_tree = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_tree",
trainable=False)
active_layer = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_layer",
trainable=False)
# Variable that becomes false once bias centering is done.
continue_centering = variables.VariableV1(
initial_value=self._center_bias,
name="continue_centering",
trainable=False)
# Create bias stats accumulator.
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
name="BiasAccumulator")
# Create steps accumulator.
steps_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar(),
name="StepsAccumulator")
# Create ensemble stats summaries.
summary.scalar("layer_stats/num_examples", num_layer_examples)
summary.scalar("layer_stats/num_steps", num_layer_steps)
summary.scalar("ensemble_stats/active_tree", active_tree)
summary.scalar("ensemble_stats/active_layer", active_layer)
# Update bias stats.
stats_update_ops = []
stats_update_ops.append(
control_flow_ops.cond(
continue_centering,
self._make_update_bias_stats_fn(
ensemble_stamp, predictions, gradients,
bias_stats_accumulator), control_flow_ops.no_op))
# Update handler stats.
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
# Two values per handler. First one is if the handler is active for the
# current layer. The second one is if the handler is going to be active
# for the next layer.
subsampling_type = self._learner_config.WhichOneof("feature_fraction")
if subsampling_type == "feature_fraction_per_level":
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed + 1, 1])
active_handlers = array_ops.stack(
[active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (
active_handlers < self._learner_config.feature_fraction_per_level)
elif subsampling_type == "feature_fraction_per_tree":
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (
active_handlers_current_layer <
self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack(
[
active_handlers_current_layer,
array_ops.ones([len(handlers)], dtype=dtypes.bool)
],
axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = (
self._learner_config.constraints.max_number_of_unique_feature_columns)
def _feature_selection_active_handlers():
# The active list for current and the next iteration.
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK],
[-1, 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = (
control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target,
_feature_selection_active_handlers,
lambda: active_handlers))
# Prepare empty gradients and hessians when handlers are not ready.
empty_hess_shape = [1] + self._hessian_shape.as_list()
empty_grad_shape = [1] + self._gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
updates, scheduled_updates = handler.update_stats(
ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(
per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(
num_layer_examples=num_layer_examples,
num_layer_steps=num_layer_steps,
num_layers=num_layers,
active_tree=active_tree,
active_layer=active_layer,
continue_centering=continue_centering,
bias_stats_accumulator=bias_stats_accumulator,
steps_accumulator=steps_accumulator,
handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
# Advance the ensemble stamp to throw away staggered workers.
stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = stamp_token + 1
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(
bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(
model_ops.tree_ensemble_deserialize(
self._ensemble_handle,
stamp_token=next_stamp_token,
tree_ensemble_config="",
name="reset_gbdt"))
reset_op = control_flow_ops.group([reset_ops])
return stats_update_ops, reset_op, training_state
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,
training_state):
"""Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
"""
batch_size = math_ops.cast(
array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
# Accumulate a step after updating stats.
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(
ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
# After adding the step, decide if further processing is needed.
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
# Get accumulated steps and examples for the current layer.
_, _, _, _, acc_examples, acc_steps = (
steps_accumulator.serialize())
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(
num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
# Determine whether we need to update tree ensemble.
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(
control_flow_ops.cond(
acc_examples >= examples_per_layer,
self.make_update_ensemble_fn(ensemble_stamp, training_state,
dropout_seed, class_id),
control_flow_ops.no_op))
# Note, the loss is calculated from the prediction considering dropouts, so
# that the value might look staggering over steps when the dropout ratio is
# high. eval_loss might be referred instead in the aspect of convergence.
return control_flow_ops.group(*ensemble_update_ops)
def make_update_ensemble_fn(self, ensemble_stamp, training_state,
dropout_seed, class_id):
"""A method to create the function which updates the tree ensemble."""
# Determine learning rate.
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(
"tuner")
if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout":
tuner = getattr(self._learner_config.learning_rate_tuner,
learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
# TODO(nponomareva, soroush) do the line search.
raise ValueError("Line search learning rate is not yet supported.")
def _update_ensemble():
"""A method to update the tree ensemble."""
# Get next stamp token.
next_ensemble_stamp = ensemble_stamp + 1
# Finalize bias stats.
_, _, _, bias_grads, bias_hess = (
training_state.bias_stats_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
# Finalize handler splits.
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready,
partition_ids, gains, split_info) = handler.make_splits(
ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
# Stack all the inputs to one tensor per type.
# This is a workaround for the slowness of graph building in tf.cond.
# See (b/36554864).
split_sizes = array_ops.reshape(
array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
# Determine if all splits are ready.
are_all_splits_ready = math_ops.reduce_all(
array_ops.stack(
are_splits_ready_list, axis=0, name="stack_handler_readiness"))
# Define bias centering update operation.
def _center_bias_fn():
# Center tree ensemble bias.
delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,
array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
delta_updates=delta_updates,
learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
# Define ensemble growing operations.
def _grow_ensemble_ready_fn():
# Grow the ensemble given the current candidates.
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
# When using the oblivious decision tree as weak learner, it produces
# one gain and one split per handler and not number of partitions.
if self._learner_config.weak_learner_type == (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=learning_rate,
partition_ids=partition_ids_list,
gains=gains_list,
splits=split_info_list,
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
# Don't grow the ensemble, just update the stamp.
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=0,
partition_ids=[],
gains=[],
splits=[],
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
# Conditionally grow an ensemble depending on whether the splits
# from all the handlers are ready.
return control_flow_ops.cond(are_all_splits_ready,
_grow_ensemble_ready_fn,
_grow_ensemble_not_ready_fn)
# Update ensemble.
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering,
_center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
# Update ensemble stats.
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(
self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(
training_state.active_layer.assign(stats.active_layer))
# Flush step stats.
update_ops.extend(
training_state.steps_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name="update_ensemble")
return _update_ensemble
def get_number_of_trees_tensor(self):
return self._finalized_trees, self._attempted_trees
def get_max_tree_depth(self):
return self._max_tree_depth
def train(self, loss, predictions_dict, labels):
"""Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
"""
del labels # unused; kept for backward compatibility.
update_op, _, training_state = self.update_stats(loss, predictions_dict)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
def _get_weights(self, hessian_shape, hessians):
"""Derives weights to be used based on hessians and multiclass strategy."""
if hessian_shape == tensor_shape.scalar():
# This is tree per class.
weights = hessians
elif len(hessian_shape.dims) == 1:
# This is diagonal hessian.
weights = math_ops.reduce_sum(hessians, axis=1)
else:
# This is full hessian.
weights = math_ops.trace(hessians)
return weights
def _full_hessian(self, grads, predictions):
"""Prepares hessians for full-hessian multiclass strategy."""
# Because of
# https://github.com/tensorflow/tensorflow/issues/675, we can't just
# compute the full hessian with a single call to gradients, but instead
# must compute it row-by-row.
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_i dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
gradients_list[row],
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
def _diagonal_hessian(self, grads, predictions):
"""Prepares hessians for diagonal-hessian multiclass mode."""
diag_hessian_list = []
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
for row, row_grads in enumerate(gradients_list):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
row_grads,
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
# Get dx_i^2 for the whole batch.
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
def _get_replica_device_setter(self, worker_device):
"""Creates a replica device setter."""
ps_tasks = self._num_ps_replicas
ps_ops = [
"Variable",
"VariableV2",
"DecisionTreeEnsembleResourceHandleOp",
"StatsAccumulatorScalarResourceHandleOp",
"StatsAccumulatorTensorResourceHandleOp",
]
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_tasks,
merge_devices=True,
ps_ops=ps_ops,
ps_strategy=ps_strategy)
def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients,
bias_stats_accumulator):
"""A method to create the function which updates the bias stats."""
def _update_bias_stats():
"""A method to update the bias stats."""
# Get reduced gradients and hessians.
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(
grads_sum,
predictions,
name="Hessians",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
# Accumulate gradients and hessians.
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros(
[self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(
ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name="update_bias_stats")
return _update_bias_stats
| 43.220062 | 89 | 0.676112 | [
"MIT"
] | JustinACoder/H22-GR3-UnrealAI | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py | 55,581 | Python |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateIndirectPartnerAccountResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'transfer_id': 'str'
}
attribute_map = {
'transfer_id': 'transfer_id'
}
def __init__(self, transfer_id=None):
"""UpdateIndirectPartnerAccountResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._transfer_id = None
self.discriminator = None
if transfer_id is not None:
self.transfer_id = transfer_id
@property
def transfer_id(self):
"""Gets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:return: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:rtype: str
"""
return self._transfer_id
@transfer_id.setter
def transfer_id(self, transfer_id):
"""Sets the transfer_id of this UpdateIndirectPartnerAccountResponse.
事务流水ID,只有成功响应才会返回。
:param transfer_id: The transfer_id of this UpdateIndirectPartnerAccountResponse.
:type: str
"""
self._transfer_id = transfer_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateIndirectPartnerAccountResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.432432 | 89 | 0.572742 | [
"Apache-2.0"
] | Lencof/huaweicloud-sdk-python-v3 | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/update_indirect_partner_account_response.py | 3,109 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# invenio-subjects-mesh is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Version information for invenio-subjects-mesh.
This file is imported by ``invenio_subjects_mesh.__init__``,
and parsed by ``setup.py``.
"""
__version__ = '2021.7.13'
| 25.25 | 73 | 0.725248 | [
"MIT"
] | fenekku/invenio-subjects-mesh | invenio_subjects_mesh/version.py | 404 | Python |
# ---------------------------------------------------------------------
# Segment handlers
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import logging
# NOC modules
from noc.sa.models.managedobject import ManagedObject
from noc.fm.models.activealarm import ActiveAlarm
logger = logging.getLogger(__name__)
def set_segment_redundancy(alarm):
"""
Set lost_redundancy to segment when redundant object is down
:param alarm:
:return:
"""
if alarm.root:
return # Already changed by root cause
mo = alarm.managed_object
seg = mo.segment
if seg.is_redundant and not seg.lost_redundancy:
u = mo.data.uplinks
if len(u) > 1:
logger.info("[%s] Redundancy lost for %s", alarm.id, seg.name)
seg.set_lost_redundancy(True)
def check_segment_redundancy(alarm):
"""
Reset lost_redundancy from segment when all redundant objects
are up
:param alarm:
:return:
"""
mo = alarm.managed_object
seg = mo.segment
if not seg.is_redundant or not seg.lost_redundancy:
return
u = mo.data.uplinks
if len(u) < 2:
return
seg_objects = list(seg.managed_objects.values_list("id", flat=True))
alarms = [
d["managed_object"]
for d in ActiveAlarm._get_collection().find(
{"managed_object": {"$in": seg_objects}}, {"_id": 0, "managed_object": 1}
)
if d["managed_object"] != mo.id
]
uplinks = ManagedObject.uplinks_for_objects(alarms)
if not any(x for x in uplinks.values() if len(x) > 1):
logger.info("[%s] Redundancy recovered for %s", alarm.id, seg.name)
seg.set_lost_redundancy(False)
| 30.57377 | 85 | 0.576408 | [
"BSD-3-Clause"
] | sbworth/getnoc | fm/handlers/alarm/segment.py | 1,865 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Spectrogram decomposition
=========================
.. autosummary::
:toctree: generated/
decompose
hpss
nn_filter
"""
import numpy as np
import scipy.sparse
from scipy.ndimage import median_filter
import sklearn.decomposition
from . import core
from . import cache
from . import segment
from . import util
from .util.exceptions import ParameterError
__all__ = ['decompose', 'hpss', 'nn_filter']
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
@cache(level=30)
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
"""Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(H,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(P,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
"""
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
# margin minimum is 1.0
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
# Compute median filters. Pre-allocation here preserves memory layout.
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
@cache(level=30)
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
| 32.827094 | 91 | 0.587641 | [
"ISC"
] | ElisaIzrailova/librosa | librosa/decompose.py | 18,417 | Python |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/SetJointTrajectoryRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import trajectory_msgs.msg
import geometry_msgs.msg
import genpy
import std_msgs.msg
class SetJointTrajectoryRequest(genpy.Message):
_md5sum = "649dd2eba5ffd358069238825f9f85ab"
_type = "gazebo_msgs/SetJointTrajectoryRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string model_name
trajectory_msgs/JointTrajectory joint_trajectory
geometry_msgs/Pose model_pose
bool set_model_pose
bool disable_physics_updates
================================================================================
MSG: trajectory_msgs/JointTrajectory
Header header
string[] joint_names
JointTrajectoryPoint[] points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: trajectory_msgs/JointTrajectoryPoint
# Each trajectory point specifies either positions[, velocities[, accelerations]]
# or positions[, effort] for the trajectory to be executed.
# All specified values are in the same order as the joint names in JointTrajectory.msg
float64[] positions
float64[] velocities
float64[] accelerations
float64[] effort
duration time_from_start
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['model_name','joint_trajectory','model_pose','set_model_pose','disable_physics_updates']
_slot_types = ['string','trajectory_msgs/JointTrajectory','geometry_msgs/Pose','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
model_name,joint_trajectory,model_pose,set_model_pose,disable_physics_updates
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetJointTrajectoryRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.model_name is None:
self.model_name = ''
if self.joint_trajectory is None:
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if self.model_pose is None:
self.model_pose = geometry_msgs.msg.Pose()
if self.set_model_pose is None:
self.set_model_pose = False
if self.disable_physics_updates is None:
self.disable_physics_updates = False
else:
self.model_name = ''
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
self.model_pose = geometry_msgs.msg.Pose()
self.set_model_pose = False
self.disable_physics_updates = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.positions))
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.velocities))
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.accelerations))
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *val1.effort))
_v1 = val1.time_from_start
_x = _v1
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.joint_trajectory is None:
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if self.model_pose is None:
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.positions = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.velocities = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.accelerations = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.effort = struct.unpack(pattern, str[start:end])
_v2 = val1.time_from_start
_x = _v2
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates,) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_3I().pack(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs))
_x = self.joint_trajectory.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.joint_trajectory.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
length = len(self.joint_trajectory.points)
buff.write(_struct_I.pack(length))
for val1 in self.joint_trajectory.points:
length = len(val1.positions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.positions.tostring())
length = len(val1.velocities)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.velocities.tostring())
length = len(val1.accelerations)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.accelerations.tostring())
length = len(val1.effort)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(val1.effort.tostring())
_v3 = val1.time_from_start
_x = _v3
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_x = self
buff.write(_get_struct_7d2B().pack(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.joint_trajectory is None:
self.joint_trajectory = trajectory_msgs.msg.JointTrajectory()
if self.model_pose is None:
self.model_pose = geometry_msgs.msg.Pose()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8')
else:
self.model_name = str[start:end]
_x = self
start = end
end += 12
(_x.joint_trajectory.header.seq, _x.joint_trajectory.header.stamp.secs, _x.joint_trajectory.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_trajectory.header.frame_id = str[start:end].decode('utf-8')
else:
self.joint_trajectory.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.joint_trajectory.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_trajectory.points = []
for i in range(0, length):
val1 = trajectory_msgs.msg.JointTrajectoryPoint()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.positions = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.velocities = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.accelerations = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
val1.effort = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_v4 = val1.time_from_start
_x = _v4
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.joint_trajectory.points.append(val1)
_x = self
start = end
end += 58
(_x.model_pose.position.x, _x.model_pose.position.y, _x.model_pose.position.z, _x.model_pose.orientation.x, _x.model_pose.orientation.y, _x.model_pose.orientation.z, _x.model_pose.orientation.w, _x.set_model_pose, _x.disable_physics_updates,) = _get_struct_7d2B().unpack(str[start:end])
self.set_model_pose = bool(self.set_model_pose)
self.disable_physics_updates = bool(self.disable_physics_updates)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
_struct_7d2B = None
def _get_struct_7d2B():
global _struct_7d2B
if _struct_7d2B is None:
_struct_7d2B = struct.Struct("<7d2B")
return _struct_7d2B
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/SetJointTrajectoryResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetJointTrajectoryResponse(genpy.Message):
_md5sum = "2ec6f3eff0161f4257b808b12bc830c2"
_type = "gazebo_msgs/SetJointTrajectoryResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
string status_message
"""
__slots__ = ['success','status_message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetJointTrajectoryResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class SetJointTrajectory(object):
_type = 'gazebo_msgs/SetJointTrajectory'
_md5sum = '88f5c10979e3f9649d5ae87a3b12aa65'
_request_class = SetJointTrajectoryRequest
_response_class = SetJointTrajectoryResponse
| 37.508306 | 292 | 0.642781 | [
"MIT"
] | Filipe-Douglas-Slam/slam_lidar_kinect | files/catkin_ws/devel/lib/python2.7/dist-packages/gazebo_msgs/srv/_SetJointTrajectory.py | 22,580 | Python |
from __future__ import unicode_literals
import netaddr
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from ipam.models import IPAddress, Prefix, VRF
class TestPrefix(TestCase):
@override_settings(ENFORCE_GLOBAL_UNIQUE=False)
def test_duplicate_global(self):
Prefix.objects.create(prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertIsNone(duplicate_prefix.clean())
@override_settings(ENFORCE_GLOBAL_UNIQUE=True)
def test_duplicate_global_unique(self):
Prefix.objects.create(prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertRaises(ValidationError, duplicate_prefix.clean)
def test_duplicate_vrf(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=False)
Prefix.objects.create(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertIsNone(duplicate_prefix.clean())
def test_duplicate_vrf_unique(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=True)
Prefix.objects.create(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
duplicate_prefix = Prefix(vrf=vrf, prefix=netaddr.IPNetwork('192.0.2.0/24'))
self.assertRaises(ValidationError, duplicate_prefix.clean)
class TestIPAddress(TestCase):
@override_settings(ENFORCE_GLOBAL_UNIQUE=False)
def test_duplicate_global(self):
IPAddress.objects.create(address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertIsNone(duplicate_ip.clean())
@override_settings(ENFORCE_GLOBAL_UNIQUE=True)
def test_duplicate_global_unique(self):
IPAddress.objects.create(address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertRaises(ValidationError, duplicate_ip.clean)
def test_duplicate_vrf(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=False)
IPAddress.objects.create(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertIsNone(duplicate_ip.clean())
def test_duplicate_vrf_unique(self):
vrf = VRF.objects.create(name='Test', rd='1:1', enforce_unique=True)
IPAddress.objects.create(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
duplicate_ip = IPAddress(vrf=vrf, address=netaddr.IPNetwork('192.0.2.1/24'))
self.assertRaises(ValidationError, duplicate_ip.clean)
| 45.774194 | 84 | 0.72093 | [
"Apache-2.0"
] | 0xAalaoui/netbox | netbox/ipam/tests/test_models.py | 2,838 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
__author__ = 'Tim Schneider <[email protected]>'
__copyright__ = "Copyright 2015, Northbridge Development Konrad & Schneider GbR"
__credits__ = ["Tim Schneider", ]
__maintainer__ = "Tim Schneider"
__email__ = "[email protected]"
__status__ = "Development"
logger = logging.getLogger(__name__)
import glob
import os
import sys
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
print BASE_DIR
sys.path.insert(0, os.path.abspath(BASE_DIR))
try:
import coverage # Import coverage if available
cov = coverage.coverage(
cover_pylib=False,
config_file=os.path.join(os.path.dirname(__file__), 'coverage.conf'),
include='%s/*' % BASE_DIR,
)
cov.start()
sys.stdout.write('Using coverage\n')
except ImportError:
cov = None
sys.stdout.write('Coverage not available. To evaluate the coverage, please install coverage.\n')
import django
from django.conf import settings
from django.core.management import execute_from_command_line
# Unfortunately, apps can not be installed via ``modify_settings``
# decorator, because it would miss the database setup.
INSTALLED_APPS = (
'django_splitdate',
)
settings.configure(
SECRET_KEY="django_tests_secret_key",
DEBUG=False,
TEMPLATE_DEBUG=False,
ALLOWED_HOSTS=[],
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
LANGUAGE_CODE='en-us',
TIME_ZONE='UTC',
USE_I18N=True,
USE_L10N=True,
USE_TZ=True,
STATIC_URL='/static/',
# Use a fast hasher to speed up tests.
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
FIXTURE_DIRS=glob.glob(BASE_DIR + '/' + '*/fixtures/')
)
django.setup()
args = [sys.argv[0], 'test']
# Current module (``tests``) and its submodules.
test_cases = '.'
# Allow accessing test options from the command line.
offset = 1
try:
sys.argv[1]
except IndexError:
pass
else:
option = sys.argv[1].startswith('-')
if not option:
test_cases = sys.argv[1]
offset = 2
args.append(test_cases)
# ``verbosity`` can be overwritten from command line.
#args.append('--verbosity=2')
args.extend(sys.argv[offset:])
execute_from_command_line(args)
if cov is not None:
sys.stdout.write('Evaluating Coverage\n')
cov.stop()
cov.save()
sys.stdout.write('Generating HTML Report\n')
cov.html_report() | 25.271845 | 100 | 0.688436 | [
"MIT"
] | Mactory/django-splitdate | django_splitdate/tests/runtests.py | 2,603 | Python |
#-------------------------------------------------------------------------------
#
# Base class for all door sensors
#
import iofun
import message
from device import Device
from querier import Querier
from querier import MsgHandler
from dbbuilder import GenericDBBuilder
from linkdb import LightDBRecordFormatter
from us.pfrommer.insteon.msg import InsteonAddress
class DefaultMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
iofun.out(self.label + " got msg: " + msg.toString())
return 1
# class StatusMsgHandler(MsgHandler):
# label = None
# def __init__(self, l):
# self.label = l
# def processMsg(self, msg):
# if (msg.getByte("command2") == 0xFF):
# iofun.out(" Status: Open")
# elif (msg.getByte("command2") == 0x00):
# iofun.out(" Status: Closed")
# return 1
class StatusMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
if msg.isExtended():
rawflags = msg.getByte("userData3") & 0xFF
flags = bin(msg.getByte("userData3") & 0xFF)[2:].zfill(8)
batterylevel = msg.getByte("userData4") & 0xFF
rawopenclosed = msg.getByte("userData5") & 0xFF
if (rawopenclosed == 0):
openclosed = "Open"
elif (rawopenclosed == 255):
openclosed = "Closed"
else:
openclosed = "Error reading status"
rawheartbeatint = msg.getByte("userData6") & 0xFF ## heartbeat interval = this value * 5minutes. 0x00 = 24 hours (default)
if (rawheartbeatint == 0):
heartbeatint = 24*60
else:
heartbeatint = rawheartbeatint * 5
lowbatterythreshold = msg.getByte("userData7") & 0xFF
# Bit 0
if (rawflags & 0b00000001 == 1):
cleanupreport = "Send Cleanup Report"
else:
cleanupreport = "Don't Send Cleanup Report"
# Bit 1
if (rawflags & 0b00000010 == 2):
twogroups = "Send Open on Group 1 ON and Closed on Group 2 ON"
else:
twogroups = "Send both Open and Closed on Group 1 (On=Open and Off=Closed)"
# Bit 2
if (rawflags & 0b00000100 == 4):
openrepeat = "Send Repeated Open Commands (Every 5 mins for 50 mins)"
else:
openrepeat = "Don't Send Repeated Open Commands"
# Bit 3
if (rawflags & 0b00001000 == 8):
closedrepeat = "Send Repeated Closed Commands (Every 5 mins for 50 mins)"
else:
closedrepeat = "Don't Send Repeated Closed Commands"
# Bit 4
if (rawflags & 0b00010000 == 16):
ffgrp = "Link to FF Group"
else:
ffgrp = "Don't link to FF Group"
# Bit 5
if (rawflags & 0b00100000 == 32):
ledonoff = "LED does not blink on transmission"
else:
ledonoff = "LED blinks on transmission"
# Bit 6
if (rawflags & 0b01000000 == 64):
noeffect = "No Effect"
else:
noeffect = "No Effect"
# Bit 7
if (rawflags & 0b10000000 == 128):
plock = "Programming lock on"
else:
plock = "Programming lock off"
iofun.out(self.label + " Battery level: " + format(batterylevel, 'd') + " Low Battery threshold: " + format(lowbatterythreshold, 'd'))
iofun.out(" Sensor Status: " + format(openclosed, 'd'))
iofun.out(" Heartbeat Set Value: " + format(rawheartbeatint , 'd'))
iofun.out(" Heartbeat Time: " + format(heartbeatint, 'd') + " minutes")
iofun.out(" Configuration Byte (hex): " + format(rawflags,'X'))
iofun.out(" Configuration Byte (binary): " + format(flags, 'd'))
iofun.out(" Bit 0: 1 = Send Cleanup Report, 0 = Don't Send Cleanup Report")
iofun.out(" Bit 1: 1 = Send Open on Group 1 ON / Closed on Group 2 ON, 0 = Send both Open and Closed on Group 1 (On=Open and Off=Closed)")
iofun.out(" Bit 2: 1 = Send Repeated Open Commands, 0 = Don't Send Repeated Open Commands")
iofun.out(" Bit 3: 1 = Send Repeated Closed Commands, 0 = Don't Send Repeated Closed Commands")
iofun.out(" Bit 4: 1 = Link to FF Group, 0 = Don't link to FF Group")
iofun.out(" Bit 5: 1 = LED does not blink on transmission, 0 = LED blinks on transmission")
iofun.out(" Bit 6: No Effect")
iofun.out(" Bit 7: 1 = Programming lock on, 0 = Programming Lock off")
iofun.out("\nCurrent Config Byte Setting:")
iofun.out("\n\t" + cleanupreport + "\n\t" + twogroups + "\n\t" + openrepeat + "\n\t"+ closedrepeat + "\n\t" + ffgrp + "\n\t" + ledonoff + "\n\t" + noeffect + "\n\t" + plock)
return 1
else:
iofun.out(self.label + " unexpected direct message: " + msg.toString())
return 0
iofun.out(self.label + " = " + format(tmp, '02d'))
return 1
class BatMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
battery = msg.getByte("command2") & 0xFF
iofun.out(" battery level: " + format(battery, 'd'))
return 1
class HiddenDoorSensor(Device):
"""============== Insteon Hidden Door Sensor ===============
NOTE: 1) The sensor must be awake in order for you to read/write data from/to it
2) Press and hold the link button to put it into Link mode. This is the best way to ensure it is awake
3) Use modem.startWatch() / modem.stopWatch() to see incoming messages
"""
def __init__(self, name, addr):
Device.__init__(self, name, addr)
self.dbbuilder = GenericDBBuilder(addr, self.db)
self.db.setRecordFormatter(LightDBRecordFormatter())
# def getStatus(self):
# """getStatus()"""
# self.querier.setMsgHandler(DefaultMsgHandler("status"))
# return self.querier.queryext(0x19, 0x00, [0,0,0])
def getStatus(self):
"""getStatus()
Reads and diplays all of the device settings as well as current open/closed position"""
self.querier.setMsgHandler(StatusMsgHandler("\nHidden Door Sensor Status and Settings\n"))
return self.querier.queryext(0x2E, 0x00, [0x01, 0x00, 0x00])
def getBatLevel(self):
"""getBatLevel()
Reports battary level as a decimal number [61=~1.75v 54=~1.6 51=~1.5 40=~1.25]"""
self.querier.setMsgHandler(BatMsgHandler("Get Bat Level"))
return self.querier.queryext(0x19, 0x01, [0,0,0])
def getFlags(self):
"""getFlags()
Reads and displays operating flags"""
iofun.writeMsg(message.createStdMsg(InsteonAddress(self.getAddress()), 0x0F, 0x1F, 0x00, -1))
def getDDBCount(self):
"""getDDBCount()
Data Base Delta flag gets incremented with any change in the Database """
iofun.writeMsg(message.createStdMsg(InsteonAddress(self.getAddress()), 0x0F, 0x1F, 0x01, -1))
def setPLOn(self):
"""setPLOn()
This enables the Local Programming Lock - No Press and Hold Linking"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Programming Lock ON"))
return self.querier.queryext(0x20, 0x00, [0x00, 0x00, 0x00]);
def setPLOff(self):
"""setPLOff()
This disables the Local Programming Lock - Allows Press and Hold Linking"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Programming Lock OFF"))
return self.querier.queryext(0x20, 0x01, [0x00, 0x00, 0x00]);
def setLEDOff(self):
"""setLEDOff()
This disables the LED blink during transmission"""
self.querier.setMsgHandler(DefaultMsgHandler("Set LED OFF"))
return self.querier.queryext(0x20, 0x02, [0x00, 0x00, 0x00]);
def setLEDOn(self):
"""setLEDOn()
This enables the LED blink during transmission"""
self.querier.setMsgHandler(DefaultMsgHandler("Set LED ON"))
return self.querier.queryext(0x20, 0x03, [0x00, 0x00, 0x00]);
def setTwoGroupsOn(self):
"""setTwoGroupsOn()
This makes the HDS send an ON to group 1 for Open and an ON to group 2 for closed."""
self.querier.setMsgHandler(DefaultMsgHandler("Set Two Groups ON"))
return self.querier.queryext(0x20, 0x04, [0x00, 0x00, 0x00]);
def setTwoGroupsOff(self):
"""setTwoGroupsOff()
this makes the HDS send an ON to group 1 for open and an OFF to group 1 for closed"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Two Groups Off"))
return self.querier.queryext(0x20, 0x05, [0x00, 0x00, 0x00]);
def setLinkToAllGrpsOn(self):
"""setLinkToAllGrpsOn()
This links the HDS to all groups (Group 0xFF)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Link to FF"))
return self.querier.queryext(0x20, 0x06, [0x00, 0x00, 0x00]);
def setLinkToAllGrpsOff(self):
"""setLinkToAllGrpsOff()
This removes the link to all groups (0xFF)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Link to FF off"))
return self.querier.queryext(0x20, 0x07, [0x00, 0x00, 0x00]);
def setCloseRepeatOn(self):
"""setCloseRepeatOn()
This sets the HDS to send repeat closed commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Close Repeat ON"))
return self.querier.queryext(0x20, 0x08, [0x00, 0x00, 0x00]);
def setCloseRepeatOff(self):
"""setCloseRepeatOff()
This stops the HDS from sending repeat closed commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Close Repeat OFF"))
return self.querier.queryext(0x20, 0x09, [0x00, 0x00, 0x00]);
def setOpenRepeatOn(self):
"""setOpenRepeatOn()
This sets the HDS to send repeat open commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Open Repeat ON"))
return self.querier.queryext(0x20, 0x0A, [0x00, 0x00, 0x00]);
def setOpenRepeatOff(self):
"""setOpenRepeatOff()
This stops the HDS from sending repeat open commands every 5 mins for 50 mins"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Open Repeat OFF"))
return self.querier.queryext(0x20, 0x0B, [0x00, 0x00, 0x00]);
def setCleanupReportOff(self):
"""setCleanupReportOff()
This prevents the HDS from sending a cleanup report after changes in status"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Cleanup Report Off\n"))
return self.querier.queryext(0x20, 0x16, [0x00, 0x00, 0x00]);
def setCleanupReportOn(self):
"""setCleanupReportOn()
This allows the HDS to send a cleanup report after changes in status"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Cleanup Report On\n"))
return self.querier.queryext(0x20, 0x17, [0x00, 0x00, 0x00]);
def setHBInterval(self, level):
"""setHBInterval(level)
This sets the heartbeat interval in 5 minute increments. Value (0-255) x 5mins (0 = 24 hours)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Heartbeat Interval"))
return self.querier.queryext(0x2E, 0x00, [0x01, 0x02, level]);
def setLowBatLevel(self, level):
"""setLowBatLevel(level)
This sets point where the HDS sends an ON command to Group 3 to indicate low battery. Value (0-255)"""
self.querier.setMsgHandler(DefaultMsgHandler("Set Heartbeat Interval"))
return self.querier.queryext(0x2E, 0x00, [0x01, 0x03, level]);
| 39.943609 | 176 | 0.686118 | [
"Unlicense"
] | vastsuperking/insteon-terminal | python/hiddendoorsensor.py | 10,625 | Python |
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.
import torch
import matplotlib.pyplot as plt
from lib.pylabyk import numpytorch as npt
from lib.pylabyk.numpytorch import npy, npys
def print_demo(p, fun):
out = fun(p)
print('-----')
print('fun: %s' % fun.__name__)
print('p:')
print(p)
print('out[0]:')
print(out[0])
print('out[1]:')
print(out[1])
print('out[0].sum(), out[1].sum()')
print(out[0].sum(), out[1].sum())
if __name__ == '__main__':
for p, fun in [
(torch.tensor([
[0., 1.],
[0.5, 0.5]
]) * 1., npt.min_distrib),
(torch.tensor([
[1., 0.],
[0.5, 0.5]
]) * 1., npt.min_distrib),
(torch.tensor([
[0.5, 0.5],
[0.5, 0.5]
]) * 0.1, npt.min_distrib),
(torch.tensor([
[0., 1.],
[0.5, 0.5]
]) * 1., npt.max_distrib),
(torch.tensor([
[1., 0.],
[0.5, 0.5]
]) * 1., npt.max_distrib),
(torch.tensor([
[0.5, 0.5],
[0.5, 0.5]
]) * 0.1, npt.max_distrib),
]:
print_demo(p, fun)
| 21.909091 | 71 | 0.443983 | [
"Apache-2.0"
] | Gravifer/pylabyk | demo/demo_min_max_distrib.py | 1,205 | Python |
def onehot_encode_seq(sequence, m=0, padding=False):
"""Converts a given IUPAC DNA sequence to a one-hot
encoded DNA sequence.
"""
import numpy as np
import torch
valid_keys = ['a','c','g','t','u','n','r','y','s','w','k','m']
nucs = {'a':0,'c':1,'g':2,'t':3,'u':3}
if padding:
assert m != 0, "If using padding, m should be bigger than 0"
padding_mat = np.tile(0.25,(m-1,4))
onehot = np.tile(.0,(len(sequence),4))
for i,char in enumerate(sequence.lower()):
if char not in valid_keys:
sys.exit("invalid char in sequence (choose from acgt and nryswkm)")
elif char == 'n':
onehot[i,:] = 0.25
elif char == 'r':
onehot[i,(0,2)] = 0.5
elif char == 'y':
onehot[i,(1,3)] = 0.5
elif char == 's':
onehot[i,(1,2)] = 0.5
elif char == 'w':
onehot[i,(0,3)] = 0.5
elif char == 'k':
onehot[i,(2,3)] = 0.5
elif char == 'm':
onehot[i,(0,1)] = 0.5
else:
onehot[i,nucs[char]] = 1
if padding:
onehot = np.concatenate((padding_mat, onehot, padding_mat))
return onehot
def save_meme(motifs_ppm_dict, output_file="found_motifs.meme"):
"""Saves the found PPMs (given as dictionary) to a file that's
compatible with MEME suite applications.
"""
import pandas as pd
meme_string = ["MEME version 4", "", "ALPHABET= ACGT", "", "strands: + -", ""]
for idx,key in enumerate(motifs_ppm_dict.keys()):
curr_motif = pd.DataFrame(motifs_ppm_dict[key])
s1 = "MOTIF " + str(key)
s2 = "letter-probability matrix: alength= " + str(curr_motif.shape[1]) + " w= " + str(curr_motif.shape[0])
s3 = curr_motif.to_csv(sep="\t", index=False, header=False)
meme_string = meme_string + [s1, s2, s3]
meme_string = "\n".join(meme_string)
with open(output_file, 'w') as the_file:
the_file.write(meme_string)
print("wrote meme list")
def align_conv_filters(model, input_seqs, m, train_ind):
"""Aligns the convolutional filters of a given scover model back
to the given input sequences at the given indices.
"""
# Motif analysis
import numpy as np
import torch
from tqdm import trange
activation_seqs = input_seqs[train_ind]
with torch.no_grad():
model.eval()
activations = model.conv_1(activation_seqs).cpu().detach().numpy().squeeze()
n_seq = activation_seqs.shape[0]
activation_seqs = activation_seqs.squeeze()
seq_len = activation_seqs.shape[1]
d = activations.shape[1]
motifs_pfm_dict = dict() # store pfms in this dict
motifs_ppm_dict = dict() # store pwms in this dict
# cycle through convolutional filters
for filter_num in trange(d):
# select activations for filter. new array = nseq x length seq
curr_activation = activations[:,filter_num,:]
# get those sequences that have positive values
seq_has_pos_vals = np.argwhere(np.amax(curr_activation, axis=1) > 0)[:,0]
# in the case that there is a minmum of 10 sequences that activate the filter
if seq_has_pos_vals.shape[0] > 10:
# per sequence, get position of maximum activation
per_seq_where_max_pos = np.argmax(curr_activation[seq_has_pos_vals], axis=1)
curr_activation_seqs = activation_seqs[seq_has_pos_vals]
curr_str_list = []
# go through sequences and save to curr_str_list
for i in range(seq_has_pos_vals.shape[0]):
# maximum activation
curr_max = per_seq_where_max_pos[i]
# get subsequence that activated filter (max 1 per seq)
curr_str_list.append(curr_activation_seqs[i][curr_max:(curr_max+m)])
# put them together in a numpy array
sequence_array = np.stack(curr_str_list)
# get sum per position
sequence_array_summed = np.sum(sequence_array,axis=0)
# save pfm
motifs_pfm_dict[str(filter_num)] = sequence_array_summed
# get counts per row
row_sums = np.sum(sequence_array_summed, axis=1)
# convert pfm to ppm
sequence_array_summed = np.nan_to_num(sequence_array_summed / row_sums[:, np.newaxis])
motifs_ppm_dict[str(filter_num)] = sequence_array_summed
return motifs_pfm_dict, motifs_ppm_dict
def randomize_sequences(sequences):
"""Randomly permutes a set of DNA sequences.
"""
import random
shuffled_seqs = []
for seq in sequences:
shuffled_seqs.append(''.join(random.sample(seq, len(seq))))
return shuffled_seqs
| 36.571429 | 114 | 0.596217 | [
"MIT"
] | jacobhepkema/scover | bin/scover_utils.py | 4,864 | Python |
from __future__ import unicode_literals
from django.apps import AppConfig
class CondominiosConfig(AppConfig):
name = 'condominios'
| 17.25 | 39 | 0.804348 | [
"MIT"
] | mpeyrotc/govector | condominios/apps.py | 138 | Python |
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.wi.utils
@author Piotr Wójcik
@date 24.03.2011
"""
import logging
import os
from time import time
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from common.utils import ServerProxy
from wi.utils.exceptions import RestErrorException
from wi.utils.messages_ajax import error, success
from wi.utils.messages_codes import get_error, auth_error_text
REDIRECT_FIELD_NAME = 'next'
CLM = ServerProxy(settings.CLOUD_MANAGER_ADDRESS)
def check_response_errors(response, session):
"""
Checks status of response response and throws appropriate error.
"""
if response['status'] != 'ok':
from wi.utils.auth import logout
error_code = response['status']
error_msg = get_error(error_code)
raise RestErrorException(error_msg)
return response
def get_dict_from_list(list_of_dicts, key_value, key='id'):
"""
Returns dictionary with key: @prm{key} equal to @prm{key_value} from a
list of dictionaries: @prm{list_of_dicts}.
"""
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) == key_value:
return dictionary
return None
def get_dicts_from_list(list_of_dicts, list_of_key_values, key='id'):
"""
Returns list of dictionaries with keys: @prm{key} equal to one from list
@prm{list_of_key_values} from a list of dictionaries: @prm{list_of_dicts}.
"""
ret = []
for dictionary in list_of_dicts:
if dictionary.get(key) == None:
raise Exception("No key: " + key + " in dictionary.")
if dictionary.get(key) in list_of_key_values:
ret.append(dictionary)
return ret
| 30.268293 | 78 | 0.705077 | [
"Apache-2.0"
] | cc1-cloud/cc1 | src/wi/utils/__init__.py | 2,483 | Python |
import connexion
from flask_cors import CORS
import api
from flask import request
import sys
def create_app():
app = connexion.FlaskApp(__name__, specification_dir='openapi/')
app.add_api('my_api.yaml')
@app.app.route("/v1/plugin/<name>/<path:path>", methods=["GET", "POST"])
def plugin(name, path):
if request.method == "GET":
return api.get_plugin(name, path, request.headers, kwargs=request.args.to_dict())
elif request.method == "POST":
return api.post_plugin(name, path, request.headers, request.get_json(), kwargs=request.args.to_dict())
else:
raise RuntimeError("unsupported method " + request.method)
CORS(app.app)
return app
| 29.44 | 114 | 0.653533 | [
"MIT"
] | xu-hao/pds-backend | api/server.py | 736 | Python |
import base64
import typing
import uuid
from urllib.parse import parse_qs
from commercetools.testing.abstract import BaseBackend
from commercetools.testing.utils import create_commercetools_response
class AuthModel:
def __init__(self):
self.tokens: typing.List[str] = []
def add_token(self, token):
self.tokens.append(token)
def is_valid(self, client_id, client_secret):
return True
class AuthBackend(BaseBackend):
path_prefix = r"/oauth/(?P<path>.*)"
hostnames = ["auth.sphere.io", "localhost"]
model_class = AuthModel
def __init__(self, *args, **kwargs):
self._expire_time = 172800
super().__init__()
def set_expire_time(self, value):
self._expire_time = value
@property
def url_prefix(self):
return r"/oauth/(?P<path>.*)"
def urls(self):
return [("token", "POST", self.token), ("introspect", "POST", self.introspect)]
def _get_api_client_credentials(
self, request
) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
params = parse_qs(request.body)
client_id = None
client_secret = None
if request.headers.get("Authorization"):
auth_type, auth_info = request.headers["Authorization"].split()
if auth_type == "Basic":
client_id, client_secret = str(base64.b64decode(auth_info)).split(":")
elif params.get("client_id") and params.get("client_secret"):
client_id = params.get("client_id")
client_secret = params.get("client_secret")
return client_id, client_secret
def token(self, request):
client_id, client_secret = self._get_api_client_credentials(request)
if not client_id or not client_secret:
response = create_commercetools_response(request, status_code=401)
return response
if self.model.is_valid(client_id, client_secret):
params = parse_qs(request.body)
scope = params.get("scope", "manage_project:todo")
token = {
"access_token": str(uuid.uuid4()),
"expires_in": self._expire_time,
"scope": scope,
"token_type": "Bearer",
}
self.model.add_token(token)
response = create_commercetools_response(request, json=token)
return response
def introspect(self, request):
client_id, client_secret = self._get_api_client_credentials(request)
if not client_id or not client_secret:
response = create_commercetools_response(request, status_code=401)
return response
if self.model.is_valid(client_id, client_secret):
token = request.qs.get("token", [None])[0]
stored_tokens = [
token_object.get("access_token") for token_object in self.model.tokens
]
if token in stored_tokens:
status = {
"active": True,
"scope": "manage_project:todo",
"exp": self._expire_time,
}
else:
status = {"active": False}
response = create_commercetools_response(request, json=status)
return response
| 34 | 87 | 0.61037 | [
"MIT"
] | BramKaashoek/commercetools-python-sdk | src/commercetools/testing/auth.py | 3,298 | Python |
# -*- coding: utf-8 -*-
# Copyright 2010-2020, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Change the reference to frameworks.
Typical usage:
% change_reference_mac.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/MacOS/target
"""
__author__ = "horo"
import optparse
import os
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def GetFrameworkPath(name, version):
return '%s.framework/Versions/%s/%s' % (name, version, name)
def GetReferenceTo(framework):
return ('@executable_path/../../../ConfigDialog.app/Contents/Frameworks/%s' %
framework)
def InstallNameTool(target, reference_from, reference_to):
cmd = ['install_name_tool', '-change', reference_from, reference_to, target]
RunOrDie(cmd)
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
unused_qtdir = os.path.abspath(opt.qtdir) # TODO(komatsu): remove this.
target = os.path.abspath(opt.target)
# Changes the reference to QtCore framework from the target application
# From: @rpath/QtCore.framework/Versions/5/QtCore
# To: @executable_path/../../../MozcTool.app/Contents/Frameworks/...
qtcore_framework = GetFrameworkPath('QtCore', '5')
InstallNameTool(target,
'@rpath/%s' % qtcore_framework,
GetReferenceTo(qtcore_framework))
# Changes the reference to QtGui framework from the target application
qtgui_framework = GetFrameworkPath('QtGui', '5')
InstallNameTool(target,
'@rpath/%s' % qtgui_framework,
GetReferenceTo(qtgui_framework))
# Changes the reference to QtWidgets framework from the target application
qtwidgets_framework = GetFrameworkPath('QtWidgets', '5')
InstallNameTool(target,
'@rpath/%s' % qtwidgets_framework,
GetReferenceTo(qtwidgets_framework))
# Change the reference to $(branding)Tool_lib from the target application
# From: @executable_path/../Frameworks/MozcTool_lib.framework/...
# To: @executable_path/../../../ConfigDialog.app/Contents/Frameworks/...
toollib_framework = GetFrameworkPath('GuiTool_lib', 'A')
InstallNameTool(target,
'@executable_path/../Frameworks/%s' % toollib_framework,
GetReferenceTo(toollib_framework))
if __name__ == '__main__':
main()
| 35.732759 | 79 | 0.723281 | [
"BSD-3-Clause"
] | dancerj/mozc | src/build_tools/change_reference_mac.py | 4,145 | Python |
number1 = 10
| 6.5 | 12 | 0.692308 | [
"MIT"
] | wang40290059/TBD39 | login.py | 13 | Python |
import torch
def binary_accuracy(preds, y):
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc | 21.888889 | 53 | 0.675127 | [
"MIT"
] | gucci-j/pytorch-imdb-cv | src/metrics.py | 197 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: users.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
import include.common_pb2 as common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users.proto',
package='tinkoff.public.invest.api.contract.v1',
syntax='proto3',
serialized_options=b'\n\034ru.tinkoff.piapi.contract.v1P\001Z\021Tinkoff/investAPI\242\002\005TIAPI\252\002\024Tinkoff.InvestAPI.V1\312\002\021Tinkoff\\Invest\\V1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0busers.proto\x12%tinkoff.public.invest.api.contract.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0c\x63ommon.proto\"\x14\n\x12GetAccountsRequest\"W\n\x13GetAccountsResponse\x12@\n\x08\x61\x63\x63ounts\x18\x01 \x03(\x0b\x32..tinkoff.public.invest.api.contract.v1.Account\"\x8d\x02\n\x07\x41\x63\x63ount\x12\n\n\x02id\x18\x01 \x01(\t\x12@\n\x04type\x18\x02 \x01(\x0e\x32\x32.tinkoff.public.invest.api.contract.v1.AccountType\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x44\n\x06status\x18\x04 \x01(\x0e\x32\x34.tinkoff.public.invest.api.contract.v1.AccountStatus\x12/\n\x0bopened_date\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63losed_date\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"0\n\x1aGetMarginAttributesRequest\x12\x12\n\naccount_id\x18\x01 \x01(\t\"\xa8\x03\n\x1bGetMarginAttributesResponse\x12K\n\x10liquid_portfolio\x18\x01 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12J\n\x0fstarting_margin\x18\x02 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12I\n\x0eminimal_margin\x18\x03 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\x12Q\n\x17\x66unds_sufficiency_level\x18\x04 \x01(\x0b\x32\x30.tinkoff.public.invest.api.contract.v1.Quotation\x12R\n\x17\x61mount_of_missing_funds\x18\x05 \x01(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.MoneyValue\"\x16\n\x14GetUserTariffRequest\"\xab\x01\n\x15GetUserTariffResponse\x12G\n\x0cunary_limits\x18\x01 \x03(\x0b\x32\x31.tinkoff.public.invest.api.contract.v1.UnaryLimit\x12I\n\rstream_limits\x18\x02 \x03(\x0b\x32\x32.tinkoff.public.invest.api.contract.v1.StreamLimit\"7\n\nUnaryLimit\x12\x18\n\x10limit_per_minute\x18\x01 \x01(\x05\x12\x0f\n\x07methods\x18\x02 \x03(\t\"-\n\x0bStreamLimit\x12\r\n\x05limit\x18\x01 \x01(\x05\x12\x0f\n\x07streams\x18\x02 \x03(\t\"\x10\n\x0eGetInfoRequest\"\\\n\x0fGetInfoResponse\x12\x13\n\x0bprem_status\x18\x01 \x01(\x08\x12\x13\n\x0bqual_status\x18\x02 \x01(\x08\x12\x1f\n\x17qualified_for_work_with\x18\x03 \x03(\t*\x80\x01\n\x0b\x41\x63\x63ountType\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_UNSPECIFIED\x10\x00\x12\x18\n\x14\x41\x43\x43OUNT_TYPE_TINKOFF\x10\x01\x12\x1c\n\x18\x41\x43\x43OUNT_TYPE_TINKOFF_IIS\x10\x02\x12\x1b\n\x17\x41\x43\x43OUNT_TYPE_INVEST_BOX\x10\x03*{\n\rAccountStatus\x12\x1e\n\x1a\x41\x43\x43OUNT_STATUS_UNSPECIFIED\x10\x00\x12\x16\n\x12\x41\x43\x43OUNT_STATUS_NEW\x10\x01\x12\x17\n\x13\x41\x43\x43OUNT_STATUS_OPEN\x10\x02\x12\x19\n\x15\x41\x43\x43OUNT_STATUS_CLOSED\x10\x03\x32\xbb\x04\n\x0cUsersService\x12\x84\x01\n\x0bGetAccounts\x12\x39.tinkoff.public.invest.api.contract.v1.GetAccountsRequest\x1a:.tinkoff.public.invest.api.contract.v1.GetAccountsResponse\x12\x9c\x01\n\x13GetMarginAttributes\x12\x41.tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest\x1a\x42.tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse\x12\x8a\x01\n\rGetUserTariff\x12;.tinkoff.public.invest.api.contract.v1.GetUserTariffRequest\x1a<.tinkoff.public.invest.api.contract.v1.GetUserTariffResponse\x12x\n\x07GetInfo\x12\x35.tinkoff.public.invest.api.contract.v1.GetInfoRequest\x1a\x36.tinkoff.public.invest.api.contract.v1.GetInfoResponseBf\n\x1cru.tinkoff.piapi.contract.v1P\x01Z\x11Tinkoff/investAPI\xa2\x02\x05TIAPI\xaa\x02\x14Tinkoff.InvestAPI.V1\xca\x02\x11Tinkoff\\Invest\\V1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,common__pb2.DESCRIPTOR,])
_ACCOUNTTYPE = _descriptor.EnumDescriptor(
name='AccountType',
full_name='tinkoff.public.invest.api.contract.v1.AccountType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_TINKOFF_IIS', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_TYPE_INVEST_BOX', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1376,
serialized_end=1504,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTTYPE)
AccountType = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTTYPE)
_ACCOUNTSTATUS = _descriptor.EnumDescriptor(
name='AccountStatus',
full_name='tinkoff.public.invest.api.contract.v1.AccountStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_NEW', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_OPEN', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ACCOUNT_STATUS_CLOSED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1506,
serialized_end=1629,
)
_sym_db.RegisterEnumDescriptor(_ACCOUNTSTATUS)
AccountStatus = enum_type_wrapper.EnumTypeWrapper(_ACCOUNTSTATUS)
ACCOUNT_TYPE_UNSPECIFIED = 0
ACCOUNT_TYPE_TINKOFF = 1
ACCOUNT_TYPE_TINKOFF_IIS = 2
ACCOUNT_TYPE_INVEST_BOX = 3
ACCOUNT_STATUS_UNSPECIFIED = 0
ACCOUNT_STATUS_NEW = 1
ACCOUNT_STATUS_OPEN = 2
ACCOUNT_STATUS_CLOSED = 3
_GETACCOUNTSREQUEST = _descriptor.Descriptor(
name='GetAccountsRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=121,
)
_GETACCOUNTSRESPONSE = _descriptor.Descriptor(
name='GetAccountsResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='accounts', full_name='tinkoff.public.invest.api.contract.v1.GetAccountsResponse.accounts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=210,
)
_ACCOUNT = _descriptor.Descriptor(
name='Account',
full_name='tinkoff.public.invest.api.contract.v1.Account',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tinkoff.public.invest.api.contract.v1.Account.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='tinkoff.public.invest.api.contract.v1.Account.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='tinkoff.public.invest.api.contract.v1.Account.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='tinkoff.public.invest.api.contract.v1.Account.status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='opened_date', full_name='tinkoff.public.invest.api.contract.v1.Account.opened_date', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='closed_date', full_name='tinkoff.public.invest.api.contract.v1.Account.closed_date', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=213,
serialized_end=482,
)
_GETMARGINATTRIBUTESREQUEST = _descriptor.Descriptor(
name='GetMarginAttributesRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='account_id', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest.account_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=484,
serialized_end=532,
)
_GETMARGINATTRIBUTESRESPONSE = _descriptor.Descriptor(
name='GetMarginAttributesResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='liquid_portfolio', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.liquid_portfolio', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='starting_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.starting_margin', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='minimal_margin', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.minimal_margin', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='funds_sufficiency_level', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.funds_sufficiency_level', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount_of_missing_funds', full_name='tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse.amount_of_missing_funds', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=959,
)
_GETUSERTARIFFREQUEST = _descriptor.Descriptor(
name='GetUserTariffRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=961,
serialized_end=983,
)
_GETUSERTARIFFRESPONSE = _descriptor.Descriptor(
name='GetUserTariffResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='unary_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.unary_limits', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stream_limits', full_name='tinkoff.public.invest.api.contract.v1.GetUserTariffResponse.stream_limits', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=986,
serialized_end=1157,
)
_UNARYLIMIT = _descriptor.Descriptor(
name='UnaryLimit',
full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit_per_minute', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.limit_per_minute', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='methods', full_name='tinkoff.public.invest.api.contract.v1.UnaryLimit.methods', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1159,
serialized_end=1214,
)
_STREAMLIMIT = _descriptor.Descriptor(
name='StreamLimit',
full_name='tinkoff.public.invest.api.contract.v1.StreamLimit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='limit', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.limit', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='streams', full_name='tinkoff.public.invest.api.contract.v1.StreamLimit.streams', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1216,
serialized_end=1261,
)
_GETINFOREQUEST = _descriptor.Descriptor(
name='GetInfoRequest',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1263,
serialized_end=1279,
)
_GETINFORESPONSE = _descriptor.Descriptor(
name='GetInfoResponse',
full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='prem_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.prem_status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qual_status', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qual_status', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qualified_for_work_with', full_name='tinkoff.public.invest.api.contract.v1.GetInfoResponse.qualified_for_work_with', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1281,
serialized_end=1373,
)
_GETACCOUNTSRESPONSE.fields_by_name['accounts'].message_type = _ACCOUNT
_ACCOUNT.fields_by_name['type'].enum_type = _ACCOUNTTYPE
_ACCOUNT.fields_by_name['status'].enum_type = _ACCOUNTSTATUS
_ACCOUNT.fields_by_name['opened_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ACCOUNT.fields_by_name['closed_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['liquid_portfolio'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['starting_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['minimal_margin'].message_type = common__pb2._MONEYVALUE
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['funds_sufficiency_level'].message_type = common__pb2._QUOTATION
_GETMARGINATTRIBUTESRESPONSE.fields_by_name['amount_of_missing_funds'].message_type = common__pb2._MONEYVALUE
_GETUSERTARIFFRESPONSE.fields_by_name['unary_limits'].message_type = _UNARYLIMIT
_GETUSERTARIFFRESPONSE.fields_by_name['stream_limits'].message_type = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetAccountsRequest'] = _GETACCOUNTSREQUEST
DESCRIPTOR.message_types_by_name['GetAccountsResponse'] = _GETACCOUNTSRESPONSE
DESCRIPTOR.message_types_by_name['Account'] = _ACCOUNT
DESCRIPTOR.message_types_by_name['GetMarginAttributesRequest'] = _GETMARGINATTRIBUTESREQUEST
DESCRIPTOR.message_types_by_name['GetMarginAttributesResponse'] = _GETMARGINATTRIBUTESRESPONSE
DESCRIPTOR.message_types_by_name['GetUserTariffRequest'] = _GETUSERTARIFFREQUEST
DESCRIPTOR.message_types_by_name['GetUserTariffResponse'] = _GETUSERTARIFFRESPONSE
DESCRIPTOR.message_types_by_name['UnaryLimit'] = _UNARYLIMIT
DESCRIPTOR.message_types_by_name['StreamLimit'] = _STREAMLIMIT
DESCRIPTOR.message_types_by_name['GetInfoRequest'] = _GETINFOREQUEST
DESCRIPTOR.message_types_by_name['GetInfoResponse'] = _GETINFORESPONSE
DESCRIPTOR.enum_types_by_name['AccountType'] = _ACCOUNTTYPE
DESCRIPTOR.enum_types_by_name['AccountStatus'] = _ACCOUNTSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountsRequest = _reflection.GeneratedProtocolMessageType('GetAccountsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsRequest)
})
_sym_db.RegisterMessage(GetAccountsRequest)
GetAccountsResponse = _reflection.GeneratedProtocolMessageType('GetAccountsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetAccountsResponse)
})
_sym_db.RegisterMessage(GetAccountsResponse)
Account = _reflection.GeneratedProtocolMessageType('Account', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.Account)
})
_sym_db.RegisterMessage(Account)
GetMarginAttributesRequest = _reflection.GeneratedProtocolMessageType('GetMarginAttributesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesRequest)
})
_sym_db.RegisterMessage(GetMarginAttributesRequest)
GetMarginAttributesResponse = _reflection.GeneratedProtocolMessageType('GetMarginAttributesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMARGINATTRIBUTESRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetMarginAttributesResponse)
})
_sym_db.RegisterMessage(GetMarginAttributesResponse)
GetUserTariffRequest = _reflection.GeneratedProtocolMessageType('GetUserTariffRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffRequest)
})
_sym_db.RegisterMessage(GetUserTariffRequest)
GetUserTariffResponse = _reflection.GeneratedProtocolMessageType('GetUserTariffResponse', (_message.Message,), {
'DESCRIPTOR' : _GETUSERTARIFFRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetUserTariffResponse)
})
_sym_db.RegisterMessage(GetUserTariffResponse)
UnaryLimit = _reflection.GeneratedProtocolMessageType('UnaryLimit', (_message.Message,), {
'DESCRIPTOR' : _UNARYLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.UnaryLimit)
})
_sym_db.RegisterMessage(UnaryLimit)
StreamLimit = _reflection.GeneratedProtocolMessageType('StreamLimit', (_message.Message,), {
'DESCRIPTOR' : _STREAMLIMIT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.StreamLimit)
})
_sym_db.RegisterMessage(StreamLimit)
GetInfoRequest = _reflection.GeneratedProtocolMessageType('GetInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _GETINFOREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoRequest)
})
_sym_db.RegisterMessage(GetInfoRequest)
GetInfoResponse = _reflection.GeneratedProtocolMessageType('GetInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _GETINFORESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:tinkoff.public.invest.api.contract.v1.GetInfoResponse)
})
_sym_db.RegisterMessage(GetInfoResponse)
DESCRIPTOR._options = None
_USERSSERVICE = _descriptor.ServiceDescriptor(
name='UsersService',
full_name='tinkoff.public.invest.api.contract.v1.UsersService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1632,
serialized_end=2203,
methods=[
_descriptor.MethodDescriptor(
name='GetAccounts',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetAccounts',
index=0,
containing_service=None,
input_type=_GETACCOUNTSREQUEST,
output_type=_GETACCOUNTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetMarginAttributes',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetMarginAttributes',
index=1,
containing_service=None,
input_type=_GETMARGINATTRIBUTESREQUEST,
output_type=_GETMARGINATTRIBUTESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetUserTariff',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetUserTariff',
index=2,
containing_service=None,
input_type=_GETUSERTARIFFREQUEST,
output_type=_GETUSERTARIFFRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetInfo',
full_name='tinkoff.public.invest.api.contract.v1.UsersService.GetInfo',
index=3,
containing_service=None,
input_type=_GETINFOREQUEST,
output_type=_GETINFORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERSSERVICE)
DESCRIPTOR.services_by_name['UsersService'] = _USERSSERVICE
# @@protoc_insertion_point(module_scope)
| 42.347084 | 3,323 | 0.777158 | [
"MIT"
] | HappyManRus/TinkoffNewAPI | include/users_pb2.py | 29,770 | Python |
from rest_framework import serializers
from vianeyRest.models import Usuario,Materia,Persona
class UsuarioSerializer(serializers.ModelSerializer):
class Meta:
model = Usuario
fields = ('id','nombreUsuario','contrasenaUsuario')
class MateriaSerializer(serializers.ModelSerializer):
class Meta:
model = Materia
fields = ('id','nombreMateria','primerParcialMateria',
'segundoParcialMateria','tercerParcialMateria',
'ordinarioMateria')
class PersonaSerializer(serializers.ModelSerializer):
class Meta:
model = Persona
fields = ('id','nombrePersona','apeliidoPPersona',
'apellidoMPersona','licenciaturaPersona',
'semestrePersona') | 29.423077 | 65 | 0.669281 | [
"Unlicense"
] | elgs1995/Servicio-Web- | vianeyRest/serializers.py | 765 | Python |
from loris.compliance.format import FormatCompliance
from loris.compliance.helpers import ComparableMixin
from loris.compliance.helpers import st
from loris.compliance.http import HttpCompliance
from loris.compliance.quality import QualityCompliance
from loris.compliance.region import RegionCompliance
from loris.compliance.rotation import RotationCompliance
from loris.compliance.size import SizeCompliance
from loris.constants import KEYWORD_MAX_AREA
from loris.constants import KEYWORD_MAX_HEIGHT
from loris.constants import KEYWORD_MAX_WIDTH
from loris.constants import QUALITY_COLOR
class Compliance(ComparableMixin):
ALL_LEVEL_1 = st(
HttpCompliance.LEVEL_1
+ QualityCompliance.LEVEL_1
+ RegionCompliance.LEVEL_1
+ RotationCompliance.LEVEL_1
+ SizeCompliance.LEVEL_1
)
ALL_LEVEL_2 = st(
FormatCompliance.LEVEL_2
+ HttpCompliance.LEVEL_2
+ QualityCompliance.LEVEL_2
+ RegionCompliance.LEVEL_2
+ RotationCompliance.LEVEL_2
+ SizeCompliance.LEVEL_2
)
def __init__(self, config):
self.format = FormatCompliance(config["formats"])
self.http = HttpCompliance(config["http"])
self.quality = QualityCompliance(config["quality"])
self.region = RegionCompliance(config["region"])
self.rotation = RotationCompliance(config["rotation"])
self.size = SizeCompliance(config["size"])
self._extra_features = None
self._int = None
self._uri = None
# make it possible to do int(self), and do comparisons
def __int__(self):
if self._int is None:
ints = map(
int, (self.format, self.http, self.quality, self.region, self.rotation, self.size)
)
self._int = min(ints)
return self._int
def __str__(self):
return f"level{int(self)}"
@property
def uri(self):
if self._uri is None:
self._uri = f"http://iiif.io/api/image/3/level{int(self)}.json"
return self._uri
@property
def all_enabled_features(self):
# Note that formats and qualities aren't 'features' and are always
# listed explicitly in the profile (other that jpg and default)
return st(
self.http.features + self.region.features + self.rotation.features + self.size.features
)
def extra_qualities(self, include_color=True):
qualities = self.quality.features
if not include_color:
qualities = tuple(filter(lambda q: q != QUALITY_COLOR, qualities))
return qualities
@property
def extra_formats(self):
return self.format.features
@property
def extra_features(self):
# Features supported above the calculated compliance level, i.e. the
# difference between all enabled features and the calculated compliance
# level. For listing in profile[1]['supports'].
if self._extra_features is None:
level_features = set(()) # 0
if int(self) == 2:
level_features = set(Compliance.ALL_LEVEL_2)
elif int(self) == 1:
level_features = set(Compliance.ALL_LEVEL_1)
self._extra_features = set(self.all_enabled_features) - level_features
return st(self._extra_features)
| 35.531915 | 99 | 0.670958 | [
"BSD-2-Clause"
] | jpstroop/loris-redux | loris/compliance/__init__.py | 3,340 | Python |
from contextlib import nullcontext as does_not_raise
from typing import Any
import pytest
from _mock_data.window_handles import WINDOW_HANDLE_1_ID, WINDOW_HANDLE_4_ID
from browserist.exception.window_handle import WindowHandleIdNotFoundError, WindowHandleIdNotValidError
from browserist.model.window.controller import WindowHandleController
@pytest.mark.parametrize("id", [
(WINDOW_HANDLE_1_ID),
])
def test_window_handle_controller_remove_handle_by_id(id: str, window_handle_controller: WindowHandleController) -> None:
assert window_handle_controller.count() == 3
window_handle_controller.remove_handle_by_id(id)
assert window_handle_controller.count() == 2
@pytest.mark.parametrize("id, expectation", [
(WINDOW_HANDLE_1_ID, does_not_raise()),
("Not valid ID", pytest.raises(WindowHandleIdNotValidError)),
])
def test_window_handle_controller_remove_handle_by_id_invalid_error(id: str, expectation: Any, window_handle_controller: WindowHandleController) -> None:
with expectation:
window_handle_controller.remove_handle_by_id(id) is not None
@pytest.mark.parametrize("id, expectation", [
(WINDOW_HANDLE_1_ID, does_not_raise()),
(WINDOW_HANDLE_4_ID, pytest.raises(WindowHandleIdNotFoundError)),
])
def test_window_handle_controller_remove_handle_by_id_not_found_error(id: str, expectation: Any, window_handle_controller: WindowHandleController) -> None:
with expectation:
window_handle_controller.remove_handle_by_id(id) is not None
| 41.611111 | 155 | 0.814419 | [
"Apache-2.0"
] | jakob-bagterp/browserist | test/browser/window/controller/remove_handle_by_id_test.py | 1,498 | Python |
from typing import IO, Dict, Optional, Set
from rdflib.plugins.serializers.xmlwriter import XMLWriter
from rdflib.namespace import Namespace, RDF, RDFS # , split_uri
from rdflib.plugins.parsers.RDFVOC import RDFVOC
from rdflib.graph import Graph
from rdflib.term import Identifier, URIRef, Literal, BNode
from rdflib.util import first, more_than
from rdflib.collection import Collection
from rdflib.serializer import Serializer
from xml.sax.saxutils import quoteattr, escape
import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
def __init__(self, store: Graph):
super(XMLSerializer, self).__init__(store)
def __bindings(self):
store = self.store
nm = store.namespace_manager
bindings = {}
for predicate in set(store.predicates()):
prefix, namespace, name = nm.compute_qname_strict(predicate)
bindings[prefix] = URIRef(namespace)
RDFNS = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
if "rdf" in bindings:
assert bindings["rdf"] == RDFNS
else:
bindings["rdf"] = RDFNS
for prefix, namespace in bindings.items():
yield prefix, namespace
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
self.base = base
elif self.store.base is not None:
self.base = self.store.base
self.__stream = stream
self.__serialized: Dict[Identifier, int] = {}
encoding = self.encoding
self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
# startDocument
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
# startRDF
write("<rdf:RDF\n")
# If provided, write xml:base attribute for the RDF
if "xml_base" in args:
write(' xml:base="%s"\n' % args["xml_base"])
elif self.base:
write(' xml:base="%s"\n' % self.base)
# TODO:
# assert(
# namespaces["http://www.w3.org/1999/02/22-rdf-syntax-ns#"]=='rdf')
bindings = list(self.__bindings())
bindings.sort()
for prefix, namespace in bindings:
if prefix:
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
write(">\n")
# write out triples by subject
for subject in self.store.subjects():
self.subject(subject, 1)
# endRDF
write("</rdf:RDF>\n")
# Set to None so that the memory can get garbage collected.
# self.__serialized = None
del self.__serialized
def subject(self, subject, depth=1):
if subject not in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, (BNode, URIRef)):
write = self.write
indent = " " * depth
element_name = "rdf:Description"
if isinstance(subject, BNode):
write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
if (subject, None, None) in self.store:
write(">\n")
for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
else:
write("/>\n")
def predicate(self, predicate, object, depth=1):
write = self.write
indent = " " * depth
qname = self.store.namespace_manager.qname_strict(predicate)
if isinstance(object, Literal):
attributes = ""
if object.language:
attributes += ' xml:lang="%s"' % object.language
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
write(
"%s<%s%s>%s</%s>\n"
% (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
)
else:
if isinstance(object, BNode):
write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
write(
"%s<%s rdf:resource=%s/>\n"
% (indent, qname, quoteattr(self.relativize(object)))
)
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
# TODO:
def fix(val):
"strip off _: from nodeIDs... as they are not valid NCNames"
if val.startswith("_:"):
return val[2:]
else:
return val
class PrettyXMLSerializer(Serializer):
def __init__(self, store: Graph, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout: Set[URIRef] = set()
def serialize(
self,
stream: IO[bytes],
base: Optional[str] = None,
encoding: Optional[str] = None,
**args,
):
self.__serialized: Dict[Identifier, int] = {}
store = self.store
# if base is given here, use that, if not and a base is set for the graph use that
if base is not None:
self.base = base
elif store.base is not None:
self.base = store.base
self.max_depth = args.get("max_depth", 3)
assert self.max_depth > 0, "max_depth must be greater than 0"
self.nm = nm = store.namespace_manager
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
namespaces[prefix] = namespace
namespaces["rdf"] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
writer.push(RDFVOC.RDF)
if "xml_base" in args:
writer.attribute(XMLBASE, args["xml_base"])
elif self.base:
writer.attribute(XMLBASE, self.base)
writer.namespaces(namespaces.items())
subject: Identifier
# Write out subjects that can not be inline
for subject in store.subjects(): # type: ignore[assignment]
if (None, None, subject) in store:
if (subject, None, subject) in store:
self.subject(subject, 1)
else:
self.subject(subject, 1)
# write out anything that has not yet been reached
# write out BNodes last (to ensure they can be inlined where possible)
bnodes = set()
for subject in store.subjects(): # type: ignore[assignment]
if isinstance(subject, BNode):
bnodes.add(subject)
continue
self.subject(subject, 1)
# now serialize only those BNodes that have not been serialized yet
for bnode in bnodes:
if bnode not in self.__serialized:
self.subject(subject, 1)
writer.pop(RDFVOC.RDF)
stream.write("\n".encode("latin-1"))
# Set to None so that the memory can get garbage collected.
self.__serialized = None # type: ignore[assignment]
def subject(self, subject: Identifier, depth: int = 1):
store = self.store
writer = self.writer
if subject in self.forceRDFAbout:
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject) # type: ignore[arg-type]
elif subject not in self.__serialized:
self.__serialized[subject] = 1
type = first(store.objects(subject, RDF.type))
try:
self.nm.qname(type)
except:
type = None
element = type or RDFVOC.Description
writer.push(element)
if isinstance(subject, BNode):
def subj_as_obj_more_than(ceil):
return True
# more_than(store.triples((None, None, subject)), ceil)
# here we only include BNode labels if they are referenced
# more than once (this reduces the use of redundant BNode
# identifiers)
if subj_as_obj_more_than(1):
writer.attribute(RDFVOC.nodeID, fix(subject))
else:
writer.attribute(RDFVOC.about, self.relativize(subject))
if (subject, None, None) in store:
for predicate, object in store.predicate_objects(subject):
if not (predicate == RDF.type and object == type):
self.predicate(predicate, object, depth + 1)
writer.pop(element)
elif subject in self.forceRDFAbout:
# TODO FIXME?: this looks like a duplicate of first condition
writer.push(RDFVOC.Description)
writer.attribute(RDFVOC.about, self.relativize(subject))
writer.pop(RDFVOC.Description)
self.forceRDFAbout.remove(subject) # type: ignore[arg-type]
def predicate(self, predicate, object, depth=1):
writer = self.writer
store = self.store
writer.push(predicate)
if isinstance(object, Literal):
if object.language:
writer.attribute(XMLLANG, object.language)
if object.datatype == RDF.XMLLiteral and isinstance(
object.value, xml.dom.minidom.Document
):
writer.attribute(RDFVOC.parseType, "Literal")
writer.text("")
writer.stream.write(object)
else:
if object.datatype:
writer.attribute(RDFVOC.datatype, object.datatype)
writer.text(object)
elif object in self.__serialized or not (object, None, None) in store:
if isinstance(object, BNode):
if more_than(store.triples((None, None, object)), 0):
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
else:
if first(store.objects(object, RDF.first)): # may not have type
# RDF.List
self.__serialized[object] = 1
# Warn that any assertions on object other than
# RDF.first and RDF.rest are ignored... including RDF.List
import warnings
warnings.warn(
"Assertions on %s other than RDF.first " % repr(object)
+ "and RDF.rest are ignored ... including RDF.List",
UserWarning,
stacklevel=2,
)
writer.attribute(RDFVOC.parseType, "Collection")
col = Collection(store, object)
for item in col:
if isinstance(item, URIRef):
self.forceRDFAbout.add(item)
self.subject(item)
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
if first(
store.triples_choices(
(object, RDF.type, [OWL_NS.Class, RDFS.Class])
)
) and isinstance(object, URIRef):
writer.attribute(RDFVOC.resource, self.relativize(object))
elif depth <= self.max_depth:
self.subject(object, depth + 1)
elif isinstance(object, BNode):
if (
object not in self.__serialized
and (object, None, None) in store
and len(list(store.subjects(object=object))) == 1
):
# inline blank nodes if they haven't been serialized yet
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
else:
writer.attribute(RDFVOC.nodeID, fix(object))
else:
writer.attribute(RDFVOC.resource, self.relativize(object))
writer.pop(predicate)
| 34.760753 | 90 | 0.551079 | [
"BSD-3-Clause"
] | GreenfishK/rdflib | rdflib/plugins/serializers/rdfxml.py | 12,931 | Python |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import os
from os.path import abspath, dirname
from restclients_core.dao import DAO
class Sdbmyuw_DAO(DAO):
def service_name(self):
return 'sdbmyuw'
def service_mock_paths(self):
return [abspath(os.path.join(dirname(__file__), "resources"))]
| 23.8 | 70 | 0.731092 | [
"Apache-2.0"
] | uw-it-aca/uw-restclients-sdbmyuw | uw_sdbmyuw/dao.py | 357 | Python |
import pandas as pd
import read_mta_turnstile as t
# This function generally generates a schedule for all stations in the df_top.csv file in a pivot table format.
def find_schedule():
# Read the stations with highest Toucan scores and select columns relavant
# to our schedule algorithm
top_stations = pd.read_csv('df_top.csv')
top_stations.rename(columns={'name':'STATION'}, inplace = True)
top_stations1 = top_stations.loc[:,['STATION','toucan_score']]
# Read the turnstile data and select the columns relavant to schedule algorithm
turnstile_data = t.read_mta_turnstile(start='20180501', end='20180531')
turnstile_data1 = turnstile_data.loc[:,['STATION','DATE','TIME','hourly_entries','hourly_exits']]
# Merge the two DataFrames to have hourly entries and exits of stations with top Toucan scores
turnstile_data2 = turnstile_data1.merge(top_stations1, on = 'STATION')
# Format dataframe and give it "day of week" and "hour of day" values and
# aggergate hourly entries of each station by date
schedule = pd.DataFrame(columns = ['STATION', 'hour_of_day', 'day_name', 'hourly_entries'])
agg = turnstile_data1.groupby(['STATION','DATE','TIME'])[['hourly_entries']].sum().reset_index()
agg.DATE = pd.to_datetime(agg.DATE, format='%m/%d/%Y')
agg.TIME = pd.to_datetime(agg.TIME, format='%H:%M:%S')
agg['day_name'] = agg.DATE.dt.day_name()
agg['hour_of_day'] = agg.TIME.dt.hour
# Remove 0, 4, and 20 hours of day. Only want 8:00am, 12:00pm, and 4:00pm
agg = agg[(agg['hour_of_day'] > 5) & (agg['hour_of_day'] < 19 )]
# Segment hours of day into three different shifts: Morning, Afternoon and Evening
l_times = []
for h in agg.hour_of_day:
if int(h) <= 11:
l_times.append('Morning')
elif int(h) >= 15:
l_times.append('Evening')
else:
l_times.append('Afternoon')
agg.hour_of_day = l_times
# For each station in the top station list, this for loop generates a schedule, which identifies
# three shifts with the highest number of entries during the week. Volunteers should be at the station
# at these three shifts.
for station_name in top_stations1.STATION.unique():
# Aggergate each station's hourly entries by day of the week, shifts of the day and
# pivot the DataFrame as shift vs. day
hm = agg.loc[agg.STATION == station_name,['hour_of_day','day_name','hourly_entries']]
hm = hm.groupby(['hour_of_day','day_name'])['hourly_entries'].mean().reset_index()
hm = hm.pivot(index='hour_of_day',columns='day_name',values='hourly_entries')
# Calculate three shifts with highest throughput
sc = hm.stack().nlargest(3).reset_index()
sc.rename(columns={0:'hourly_entries'}, inplace=True)
sc['STATION'] = [station_name]*3
schedule = schedule.append(sc) # This is a schedule for all stations in the top station list.
# Make a pivot table of the schedule
schedule['p'] = [1]*schedule.shape[0]
schedule_pivot = schedule.pivot_table(index=['STATION'],columns=['day_name','hour_of_day'],values='p')
return schedule_pivot | 51.903226 | 111 | 0.678061 | [
"MIT"
] | Stitchmaker/Metis_Bootcamp | 1-Benson_Project/find_schedule.py | 3,218 | Python |
import _continuation
import threading
__all__ = ['Fiber', 'error', 'current']
_tls = threading.local()
def current():
try:
return _tls.current_fiber
except AttributeError:
fiber = _tls.current_fiber = _tls.main_fiber = _create_main_fiber()
return fiber
class error(Exception):
pass
class Fiber(object):
_cont = None
_thread_id = None
_ended = False
def __init__(self, target=None, args=[], kwargs={}, parent=None):
def _run(c):
_tls.current_fiber = self
try:
return target(*args, **kwargs)
finally:
cont = self._cont
self._cont = None
self._ended = True
_continuation.permute(cont, self._get_active_parent()._cont)
self._func = _run
if parent is None:
parent = current()
self._thread_id = threading.current_thread().ident
if self._thread_id != parent._thread_id:
raise error('parent cannot be on a different thread')
self.parent = parent
def _get_active_parent(self):
parent = self.parent
while True:
if parent is not None and parent._cont is not None and not parent._ended:
break
parent = parent.parent
return parent
@classmethod
def current(cls):
return current()
@property
def parent(self):
return self.__dict__.get('parent', None)
@parent.setter
def parent(self, value):
if not isinstance(value, Fiber):
raise TypeError('parent must be a Fiber')
if value._ended:
raise ValueError('parent must not have ended')
if self._thread_id != value._thread_id:
raise ValueError('parent cannot be on a different thread')
self.__dict__['parent'] = value
def switch(self, value=None):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
self._cont = _continuation.continulet(self._func)
try:
return curr._cont.switch(value=value, to=self._cont)
finally:
_tls.current_fiber = curr
def throw(self, *args):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
# Fiber was not started yet, propagate to parent directly
self._ended = True
return self._get_active_parent().throw(*args)
try:
return curr._cont.throw(*args, to=self._cont)
finally:
_tls.current_fiber = curr
def is_alive(self):
return (self._cont is not None and self._cont.is_pending()) or \
(self._cont is None and not self._ended)
def __getstate__(self):
raise TypeError('cannot serialize Fiber object')
def _create_main_fiber():
main_fiber = Fiber.__new__(Fiber)
main_fiber._cont = _continuation.continulet.__new__(_continuation.continulet)
main_fiber._ended = False
main_fiber._thread_id = threading.current_thread().ident
main_fiber.__dict__['parent'] = None
return main_fiber
| 27.895161 | 85 | 0.603643 | [
"MIT"
] | timgates42/python-fibers | fibers/_pyfibers.py | 3,459 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
CASCADE_PLUGINS = getattr(settings, 'SHOP_CASCADE_PLUGINS',
('auth', 'breadcrumb', 'catalog', 'cart', 'checkout', 'extensions', 'order', 'processbar', 'search',))
def set_defaults(config):
config.setdefault('plugins_with_extra_fields', {})
config['plugins_with_extra_fields'].setdefault('ShopReorderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
config['plugins_with_extra_fields'].setdefault('ShopCancelOrderButtonPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Margins': ['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],
'extra_units:Margins': 'px,em'
},
))
| 38.615385 | 106 | 0.685259 | [
"BSD-3-Clause"
] | Edison4mobile/django-shopping | shop/cascade/settings.py | 1,004 | Python |
import sklearn.neighbors
from numpy import linalg as LA
from apexpy import Apex
import numpy as np
#Create an Apex conversion instance at the usual reference altitude
#no epoch is specified; we will set the epoch just-in-time when we are going to
#do an coordinate transformation
apex_reference_height = 110000. # Apex reference height in meters
module_Apex = Apex(refh=apex_reference_height/1000.)
def update_apex_epoch(dt):
year = dt.year
doy = dt.timetuple().tm_yday
epoch = year+doy/(366. if np.mod(year,4)==0 else 365.)
print('Setting Apex epoch for {} to {}'.format(dt.strftime('%Y%m%d'),epoch))
module_Apex.set_epoch(epoch)
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k = 5, tol = 1.5):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
tol = np.deg2rad(tol)
#reshape to N by 2 array where each row is (lat, lon)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape(-1,1),lon_dmsp.flatten().reshape(-1,1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape(-1,1), lon_map.flatten().reshape(-1,1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors = k, radius = tol, metric = 'haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance = True)
#indices
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = distances/np.nansum(distances)
obs_interp[i] = np.nansum( obs_val[inds] * weights)
return obs_interp
def latlt2polar(lat,lt,hemisphere):
"""
Converts an array of latitude and lt points to polar for a top-down dialplot (latitude in degrees, LT in hours)
i.e. makes latitude the radial quantity and MLT the azimuthal
get the radial displacement (referenced to down from northern pole if we want to do a top down on the north,
or up from south pole if visa-versa)
"""
from numpy import pi
if hemisphere=='N':
r = 90.-lat
elif hemisphere=='S':
r = 90.-(-1*lat)
else:
raise ValueError('%s is not a valid hemisphere, N or S, please!' % (hemisphere))
#convert lt to theta (azimuthal angle) in radians
theta = lt/24. * 2*pi
#the pi/2 rotates the coordinate system from
#theta=0 at negative y-axis (local time) to
#theta=0 at positive x axis (traditional polar coordinates)
return r,theta
def polar2dial(ax):
"""
Turns a matplotlib axes polar plot into a dial plot
"""
#Rotate the plot so that noon is at the top and midnight
#is at the bottom, and fix the labels so radial direction
#is latitude and azimuthal direction is local time in hours
ax.set_theta_zero_location('S')
theta_label_values = np.array([0.,3.,6.,9.,12.,15.,18.,21.])*180./12
theta_labels = ['%d:00' % (int(th/180.*12)) for th in theta_label_values.flatten().tolist()]
ax.set_thetagrids(theta_label_values,labels=theta_labels)
r_label_values = 90.-np.array([80.,70.,60.,50.,40.])
r_labels = [r'$%d^{o}$' % (int(90.-rv)) for rv in r_label_values.flatten().tolist()]
ax.set_rgrids(r_label_values,labels=r_labels)
ax.set_rlim([0.,40.])
def map_polar2cart(LAT,LON, hemi = 'N'):
#convert latitude and longitude (in degrees) to cartesian coordinates for interpolation purposes
X_map, Y_map = satplottools.latlon2cart(LAT.flatten(), LON.flatten(),hemi)
return X_map, Y_map
def dmsp_map_interpolate(X_dmsp, Y_dmsp, X_map, Y_map, tolerance = 0.5):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
#indices of the map that fit the dmsp map
indices = scipy.interpolate.griddata((X_map,Y_map), np.arange(len(X_map.flatten())), (X_dmsp,Y_dmsp), method = 'nearest')
#get mask for map elements that are within distance tolerance
mask = (abs(X_map[indices] - X_dmsp) < tolerance) & (abs(Y_map[indices] - Y_dmsp) < tolerance)
return indices,mask
def greatCircleDist(location1,location2,lonorlt='lt'):
#Returns n angular distances in radians between n-by-2 numpy arrays
#location1, location2 (calculated row-wise so diff between
#location1[0,] and location2[0,]
#assuming that these arrays have the columns lat[deg],localtime[hours]
#and that they are points on a sphere of constant radius
#(the points are at the same altitude)
pi = np.pi
azi2rad = pi/12. if lonorlt=='lt' else pi/180
wrappt = 24. if lonorlt=='lt' else 360.
#Bounds check
over = location1[:,1] > wrappt
under = location1[:,1] < 0.
location1[over,1]=location1[over,1]-wrappt
location1[under,1]=location1[under,1]+wrappt
if location1.ndim == 1 or location2.ndim == 1:
dphi = abs(location2[1]-location1[1])*azi2rad
a = (90-location1[0])/360*2*pi #get the colatitude in radians
b = (90-location2[0])/360*2*pi
C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians
else:
dphi = abs(location2[:,1]-location1[:,1])*azi2rad
a = (90-location1[:,0])/360*2*pi #get the colatitude in radians
b = (90-location2[:,0])/360*2*pi
C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians
return arccos(cos(a)*cos(b)+sin(a)*sin(b)*cos(C))
def myGreatCircleDistance(location1,location2):
#add a dimension
location1 = location1.reshape(1, 2)
location2 = location2.reshape(1, 2)
# location2.shape = (1,)+location2.shape[:,1]
angular_distance = greatCircleDist(location1,location2,lonorlt='lon')
return angular_distance
def dmsp_map_interpolate_NN_smooth(X_dmsp, Y_dmsp, X_map, Y_map, Obs_map, k = 5, tol = 3):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
#reshape to N by 2 array where each row is (X, Y)
dmsp_points = np.hstack((X_dmsp.flatten().reshape(-1,1),Y_dmsp.flatten().reshape(-1,1)))
map_points = np.hstack((X_map.flatten().reshape(-1,1), Y_map.flatten().reshape(-1,1)))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.BallTree(map_points,leaf_size = 40 )
dists, inds = model.query(dmsp_points, k=k)
obs_interp = np.empty(N_points)
for i in range(N_points):
norm = LA.norm(dists[i])
if (norm > tol):
obs_interp[i] = np.nan
else:
# weights = dists[i]/norm
weights = dists[i]/np.nansum(dists[i])
obs_interp[i] = np.nansum( obs_val[inds[i]] * weights )
return obs_interp
def dmsp_map_interpolate_NN_smooth_great_circle(lat_dmsp, lon_dmsp, lat_map, lon_map, Obs_map, k = 5, tol = 1.5):
"""
generic function to spatially interpolate with the SSJ data using nearest neighbors using some arbirtary distance tolerance
"""
tol = np.deg2rad(tol)
#reshape to N by 2 array where each row is (lat, lon)
dmsp_points = np.deg2rad(np.hstack((lat_dmsp.flatten().reshape(-1,1),lon_dmsp.flatten().reshape(-1,1))))
map_points = np.deg2rad(np.hstack((lat_map.flatten().reshape(-1,1), lon_map.flatten().reshape(-1,1))))
N_points = dmsp_points.shape[0]
obs_val = Obs_map.flatten()
model = sklearn.neighbors.NearestNeighbors(n_neighbors = k, radius = tol, metric = 'haversine')
model.fit(map_points)
neighbors = model.kneighbors(dmsp_points, return_distance = True)
#indices
obs_interp = np.empty(N_points)
for i in range(N_points):
distances = neighbors[0][i]
inds = neighbors[1][i]
weights = distances/np.nansum(distances)
obs_interp[i] = np.nansum( obs_val[inds] * weights)
return obs_interp
from ssj_auroral_boundary import dmsp_spectrogram
def jd2dayhour(jds):
#assume jd is an array
temp = jds - 0.5
hours = (temp - np.floor(temp))*24
return hours
| 41.715736 | 127 | 0.676199 | [
"MIT"
] | jali7001/LBH_to_E_flux | LBH_to_eflux/helper_funcs.py | 8,218 | Python |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from cm_api.endpoints.types import *
__docformat__ = "epytext"
HOST_TEMPLATES_PATH = "/clusters/%s/hostTemplates"
HOST_TEMPLATE_PATH = "/clusters/%s/hostTemplates/%s"
APPLY_HOST_TEMPLATE_PATH = HOST_TEMPLATE_PATH + "/commands/applyHostTemplate"
def create_host_template(resource_root, name, cluster_name):
"""
Create a host template.
@param resource_root: The root Resource object.
@param name: Host template name
@param cluster_name: Cluster name
@return: An ApiHostTemplate object for the created host template.
@since: API v3
"""
apitemplate = ApiHostTemplate(resource_root, name, [])
return call(resource_root.post,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, data=[apitemplate], api_version=3)[0]
def get_host_template(resource_root, name, cluster_name):
"""
Lookup a host template by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: An ApiHostTemplate object.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, api_version=3)
def get_all_host_templates(resource_root, cluster_name="default"):
"""
Get all host templates in a cluster.
@param cluster_name: Cluster name.
@return: ApiList of ApiHostTemplate objects for all host templates in a cluster.
@since: API v3
"""
return call(resource_root.get,
HOST_TEMPLATES_PATH % (cluster_name,),
ApiHostTemplate, True, api_version=3)
def delete_host_template(resource_root, name, cluster_name):
"""
Delete a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@return: The deleted ApiHostTemplate object.
@since: API v3
"""
return call(resource_root.delete,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, api_version=3)
def update_host_template(resource_root, name, cluster_name, api_host_template):
"""
Update a host template identified by name in the specified cluster.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param api_host_template: The updated host template.
@return: The updated ApiHostTemplate.
@since: API v3
"""
return call(resource_root.put,
HOST_TEMPLATE_PATH % (cluster_name, name),
ApiHostTemplate, data=api_host_template, api_version=3)
def apply_host_template(resource_root, name, cluster_name, host_ids, start_roles):
"""
Apply a host template identified by name on the specified hosts and
optionally start them.
@param resource_root: The root Resource object.
@param name: Host template name.
@param cluster_name: Cluster name.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
@since: API v3
"""
host_refs = []
for host_id in host_ids:
host_refs.append(ApiHostRef(resource_root, host_id))
params = {"startRoles" : start_roles}
return call(resource_root.post,
APPLY_HOST_TEMPLATE_PATH % (cluster_name, name),
ApiCommand, data=host_refs, params=params, api_version=3)
class ApiHostTemplate(BaseApiResource):
_ATTRIBUTES = {
'name' : None,
'roleConfigGroupRefs' : Attr(ApiRoleConfigGroupRef),
'clusterRef' : ROAttr(ApiClusterRef),
}
def __init__(self, resource_root, name=None, roleConfigGroupRefs=None):
BaseApiObject.init(self, resource_root, locals())
def __str__(self):
return "<ApiHostTemplate>: %s (cluster %s)" % (self.name, self.clusterRef.clusterName)
def _api_version(self):
return 3
def _path(self):
return HOST_TEMPLATE_PATH % (self.clusterRef.clusterName, self.name)
def _do_update(self, update):
self._update(self._put('', ApiHostTemplate, data=update))
return self
def rename(self, new_name):
"""
Rename a host template.
@param new_name: New host template name.
@return: An ApiHostTemplate object.
"""
update = copy.copy(self)
update.name = new_name
return self._do_update(update)
def set_role_config_groups(self, role_config_group_refs):
"""
Updates the role config groups in a host template.
@param role_config_group_refs: List of role config group refs.
@return: An ApiHostTemplate object.
"""
update = copy.copy(self)
update.roleConfigGroupRefs = role_config_group_refs
return self._do_update(update)
def apply_host_template(self, host_ids, start_roles):
"""
Apply a host template identified by name on the specified hosts and
optionally start them.
@param host_ids: List of host ids.
@param start_roles: Whether to start the created roles or not.
@return: An ApiCommand object.
"""
return apply_host_template(self._get_resource_root(), self.name, self.clusterRef.clusterName, host_ids, start_roles)
| 35.345455 | 120 | 0.736968 | [
"Apache-2.0"
] | AnniDu/cm_api | python/src/cm_api/endpoints/host_templates.py | 5,832 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
async def _get_effective_route_table_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def begin_get_effective_route_table(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveRouteListResult"]:
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def _list_effective_network_security_groups_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
async def begin_list_effective_network_security_groups(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveNetworkSecurityGroupListResult"]:
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
async def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| 52.284426 | 354 | 0.680107 | [
"MIT"
] | AriZavala2/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py | 63,787 | Python |
# -*- coding: utf-8 -*-
# @Time : 2021/5/8 7:14
# @Author : 咸鱼型233
# @File : v1.1_py3_adjust.py
# @Software: PyCharm
# @Function: v1.0的py3适应性调整
# 修改记录 : 2021.5.8-20:34-改崩了,下一版用urllib3实现
import base64
import json
import urllib
import requests
import urllib3
from config import APPCODE, path_image
# 获取图片二进制数据的base64编码(API请求参数需要)
def get_img_base64(img_file):
with open(img_file, 'rb') as infile:
s = infile.read()
return base64.b64encode(s)
# ---
def predict(url, appcode, img_base64, kv_config, old_format):
# 构造请求参数(Body)
if not old_format:
param = {'image': str(img_base64)}
if kv_config is not None:
param['configure'] = json.dumps(kv_config)
# param = json.dumps(param)
body = json.dumps(param)
else:
param = {}
pic = {'dataType': 50, 'dataValue': img_base64}
param['image'] = pic
if kv_config is not None:
conf = {'dataType': 50, 'dataValue': json.dumps(kv_config)}
param['configure'] = conf
inputs = {"inputs": [param]}
body = json.dumps(inputs)
# 根据阿里云表格文字识别API的APPCODE构造Headers
headers = {'Authorization': 'APPCODE %s' % appcode}
# request = requests.post(url=url, headers=headers, data=body)
try:
# response = requests.get(request, timeout=10)
response = requests.get(url=url, headers=headers, data=body)
return response.code, response.headers, response.read()
except Exception as e:
return e.code, e.headers, e.read()
def demo():
appcode = APPCODE
url = 'https://form.market.alicloudapi.com/api/predict/ocr_table_parse'
img_file = path_image
# 如果输入带有inputs, 设置为True,否则设为False
is_old_format = False
config = {'format': 'html', 'finance': False, 'dir_assure': False}
# 如果没有configure字段,config设为None
# config = None
img_base64data = get_img_base64(img_file)
stat, header, content = predict(url, appcode, img_base64data, config, is_old_format)
if stat != 200:
print('Http status code: ', stat)
print('Error msg in header: ', header['x-ca-error-message'] if 'x-ca-error-message' in header else '')
print('Error msg in body: ', content)
exit()
if is_old_format:
result_str = json.loads(content)['outputs'][0]['outputValue']['dataValue']
else:
result_str = content
print(result_str)
# result = json.loads(result_str)
if __name__ == '__main__':
demo()
| 28.930233 | 110 | 0.633039 | [
"MIT"
] | Ayusummer/DailyNotes | DailyLife/picOCR_toExcel/old_version/v1.1_py3_adjust.py | 2,654 | Python |
import logging
import zmq
from gabriel_protocol import gabriel_pb2
from gabriel_server import network_engine
TEN_SECONDS = 10000
REQUEST_RETRIES = 3
logger = logging.getLogger(__name__)
def run(engine, source_name, server_address, all_responses_required=False,
timeout=TEN_SECONDS, request_retries=REQUEST_RETRIES):
context = zmq.Context()
while request_retries > 0:
socket = context.socket(zmq.REQ)
socket.connect(server_address)
from_standalone_engine = gabriel_pb2.FromStandaloneEngine()
from_standalone_engine.welcome.source_name = source_name
from_standalone_engine.welcome.all_responses_required = (
all_responses_required)
socket.send(from_standalone_engine.SerializeToString())
logger.info('Sent welcome message to server')
while True:
if socket.poll(timeout) == 0:
logger.warning('No response from server')
socket.setsockopt(zmq.LINGER, 0)
socket.close()
request_retries -= 1
break
message_from_server = socket.recv()
if message_from_server == network_engine.HEARTBEAT:
socket.send(network_engine.HEARTBEAT)
continue
input_frame = gabriel_pb2.InputFrame()
input_frame.ParseFromString(message_from_server)
result_wrapper = engine.handle(input_frame)
from_standalone_engine = gabriel_pb2.FromStandaloneEngine()
from_standalone_engine.result_wrapper.CopyFrom(result_wrapper)
socket.send(from_standalone_engine.SerializeToString())
logger.warning('Ran out of retires. Abandoning server connection.')
| 34.66 | 74 | 0.683785 | [
"Apache-2.0"
] | cmusatyalab/gabriel | server/src/gabriel_server/network_engine/engine_runner.py | 1,733 | Python |
import cupy as np
def supersample(clip, d, n_frames):
"""Replaces each frame at time t by the mean of `n_frames` equally spaced frames
taken in the interval [t-d, t+d]. This results in motion blur.
"""
def filter(get_frame, t):
timings = np.linspace(t - d, t + d, n_frames)
frame_average = np.mean(
1.0 * np.array([get_frame(t_) for t_ in timings], dtype="uint16"), axis=0
)
return frame_average.astype("uint8")
return clip.transform(filter)
| 30 | 85 | 0.627451 | [
"MIT"
] | va6996/moviepy | moviepy/video/fx/supersample.py | 510 | Python |
"""scrapli_cfg.platform.core.cisco_iosxe.sync_platform"""
from typing import Any, Callable, List, Optional
from scrapli.driver import NetworkDriver
from scrapli.response import MultiResponse, Response
from scrapli_cfg.diff import ScrapliCfgDiffResponse
from scrapli_cfg.exceptions import DiffConfigError, FailedToDetermineDeviceState
from scrapli_cfg.platform.base.sync_platform import ScrapliCfgPlatform
from scrapli_cfg.platform.core.cisco_iosxe.base_platform import (
CONFIG_SOURCES,
FilePromptMode,
ScrapliCfgIOSXEBase,
)
from scrapli_cfg.response import ScrapliCfgResponse
class ScrapliCfgIOSXE(ScrapliCfgPlatform, ScrapliCfgIOSXEBase):
def __init__(
self,
conn: NetworkDriver,
*,
config_sources: Optional[List[str]] = None,
on_prepare: Optional[Callable[..., Any]] = None,
filesystem: str = "flash:",
cleanup_post_commit: bool = True,
dedicated_connection: bool = False,
ignore_version: bool = False,
) -> None:
if config_sources is None:
config_sources = CONFIG_SOURCES
super().__init__(
conn=conn,
config_sources=config_sources,
on_prepare=on_prepare,
dedicated_connection=dedicated_connection,
ignore_version=ignore_version,
)
self.filesystem = filesystem
self._filesystem_space_available_buffer_perc = 10
self._replace = False
self.candidate_config_filename = ""
self.cleanup_post_commit = cleanup_post_commit
def _get_filesystem_space_available(self) -> int:
"""
Abort a configuration -- discards any loaded config
Args:
N/A
Returns:
None
Raises:
FailedToDetermineDeviceState: if unable to fetch file filesystem bytes available
"""
filesystem_size_result = self.conn.send_command(command=f"dir {self.filesystem} | i bytes")
if filesystem_size_result.failed:
raise FailedToDetermineDeviceState("failed to determine space available on filesystem")
return self._post_get_filesystem_space_available(output=filesystem_size_result.result)
def _determine_file_prompt_mode(self) -> FilePromptMode:
"""
Determine the device file prompt mode
Args:
N/A
Returns:
FilePromptMode: enum representing file prompt mode
Raises:
FailedToDetermineDeviceState: if unable to fetch file prompt mode
"""
file_prompt_mode_result = self.conn.send_command(command="show run | i file prompt")
if file_prompt_mode_result.failed:
raise FailedToDetermineDeviceState("failed to determine file prompt mode")
return self._post_determine_file_prompt_mode(output=file_prompt_mode_result.result)
def _delete_candidate_config(self) -> Response:
"""
Delete candidate config from the filesystem
Args:
N/A
Returns:
Response: response from deleting the candidate config
Raises:
N/A
"""
# have to check again because the candidate config may have changed this!
file_prompt_mode = self._determine_file_prompt_mode()
if file_prompt_mode in (FilePromptMode.ALERT, FilePromptMode.NOISY):
delete_events = [
(
f"delete {self.filesystem}{self.candidate_config_filename}",
"Delete filename",
),
(
"",
"[confirm]",
),
("", ""),
]
else:
delete_events = [
(f"delete {self.filesystem}{self.candidate_config_filename}", "[confirm]"),
("", ""),
]
delete_result = self.conn.send_interactive(interact_events=delete_events)
return delete_result
def get_version(self) -> ScrapliCfgResponse:
response = self._pre_get_version()
version_result = self.conn.send_command(command="show version | i Version")
return self._post_get_version(
response=response,
scrapli_responses=[version_result],
result=self._parse_version(device_output=version_result.result),
)
def get_config(self, source: str = "running") -> ScrapliCfgResponse:
response = self._pre_get_config(source=source)
config_result = self.conn.send_command(command=self._get_config_command(source=source))
return self._post_get_config(
response=response,
source=source,
scrapli_responses=[config_result],
result=config_result.result,
)
def load_config(self, config: str, replace: bool = False, **kwargs: Any) -> ScrapliCfgResponse:
"""
Load configuration to a device
Supported kwargs:
auto_clean: automatically "clean" any data that would be in a configuration from a
"get_config" operation that would prevent loading a config -- for example, things
like the "Building Configuration" lines in IOSXE output, etc.. Defaults to `True`
Args:
config: string of the configuration to load
replace: replace the configuration or not, if false configuration will be loaded as a
merge operation
kwargs: additional kwargs that the implementing classes may need for their platform,
see above for iosxe supported kwargs
Returns:
ScrapliCfgResponse: response object
Raises:
N/A
"""
if kwargs.get("auto_clean", True) is True:
config = self.clean_config(config=config)
response = self._pre_load_config(config=config)
config = self._prepare_load_config(config=config, replace=replace)
filesystem_bytes_available = self._get_filesystem_space_available()
self._space_available(filesystem_bytes_available=filesystem_bytes_available)
# when in tcl command mode or whatever it is, tcl wants \r for return char, so stash the
# original return char and sub in \r for a bit
original_return_char = self.conn.comms_return_char
tcl_comms_return_char = "\r"
# pop into tclsh before swapping the return char just to be safe -- \r or \n should both be
# fine for up to here but who knows... :)
self.conn.acquire_priv(desired_priv="tclsh")
self.conn.comms_return_char = tcl_comms_return_char
config_result = self.conn.send_config(config=config, privilege_level="tclsh")
# reset the return char to the "normal" one and drop into whatever is the "default" priv
self.conn.acquire_priv(desired_priv=self.conn.default_desired_privilege_level)
self.conn.comms_return_char = original_return_char
return self._post_load_config(
response=response,
scrapli_responses=[config_result],
)
def abort_config(self) -> ScrapliCfgResponse:
response = self._pre_abort_config(
session_or_config_file=bool(self.candidate_config_filename)
)
abort_result = self._delete_candidate_config()
self._reset_config_session()
return self._post_abort_config(response=response, scrapli_responses=[abort_result])
def save_config(self) -> Response:
"""
Save the config -- "copy run start"!
Args:
N/A
Returns:
Response: scrapli response object
Raises:
N/A
"""
# we always re-check file prompt mode because it could have changed!
file_prompt_mode = self._determine_file_prompt_mode()
if file_prompt_mode == FilePromptMode.ALERT:
save_events = [
(
"copy running-config startup-config",
"Destination filename",
),
("", ""),
]
elif file_prompt_mode == FilePromptMode.NOISY:
save_events = [
(
"copy running-config startup-config",
"Source filename",
),
(
"",
"Destination filename",
),
("", ""),
]
else:
save_events = [("copy running-config startup-config", "")]
save_result = self.conn.send_interactive(interact_events=save_events)
return save_result
def _commit_config_merge(self, file_prompt_mode: Optional[FilePromptMode] = None) -> Response:
"""
Commit the configuration in merge mode
Args:
file_prompt_mode: optionally provide the file prompt mode, if its None we will fetch it
to decide if we need to use interactive mode or not
Returns:
Response: scrapli response object
Raises:
N/A
"""
if file_prompt_mode is None:
file_prompt_mode = self._determine_file_prompt_mode()
if file_prompt_mode == FilePromptMode.ALERT:
merge_events = [
(
f"copy {self.filesystem}{self.candidate_config_filename} running-config",
"Destination filename",
),
("", ""),
]
elif file_prompt_mode == FilePromptMode.NOISY:
merge_events = [
(
f"copy {self.filesystem}{self.candidate_config_filename} running-config",
"Source filename",
),
(
"",
"Destination filename",
),
("", ""),
]
else:
merge_events = [
(f"copy {self.filesystem}{self.candidate_config_filename} running-config", "")
]
commit_result = self.conn.send_interactive(interact_events=merge_events)
return commit_result
def commit_config(self, source: str = "running") -> ScrapliCfgResponse:
scrapli_responses = []
response = self._pre_commit_config(
source=source, session_or_config_file=bool(self.candidate_config_filename)
)
file_prompt_mode = self._determine_file_prompt_mode()
if self._replace is True:
replace_command = (
f"configure replace {self.filesystem}{self.candidate_config_filename} force"
)
commit_result = self.conn.send_command(command=replace_command)
else:
commit_result = self._commit_config_merge(file_prompt_mode=file_prompt_mode)
scrapli_responses.append(commit_result)
save_config_result = self.save_config()
scrapli_responses.append(save_config_result)
if self.cleanup_post_commit:
cleanup_result = self._delete_candidate_config()
scrapli_responses.append(cleanup_result)
self._reset_config_session()
return self._post_load_config(
response=response,
scrapli_responses=scrapli_responses,
)
def diff_config(self, source: str = "running") -> ScrapliCfgDiffResponse:
scrapli_responses = []
device_diff = ""
source_config = ""
diff_response = self._pre_diff_config(
source=source, session_or_config_file=bool(self.candidate_config_filename)
)
try:
diff_result = self.conn.send_command(command=self._get_diff_command(source=source))
scrapli_responses.append(diff_result)
if diff_result.failed:
msg = "failed generating diff for config session"
self.logger.critical(msg)
raise DiffConfigError(msg)
device_diff = diff_result.result
source_config_result = self.get_config(source=source)
source_config = source_config_result.result
if isinstance(source_config_result.scrapli_responses, MultiResponse):
# in this case this will always be a multiresponse or nothing (failure) but mypy
# doesnt know that, hence the isinstance check
scrapli_responses.extend(source_config_result.scrapli_responses)
if source_config_result.failed:
msg = "failed fetching source config for diff comparison"
self.logger.critical(msg)
raise DiffConfigError(msg)
except DiffConfigError:
pass
source_config, candidate_config = self._normalize_source_candidate_configs(
source_config=source_config
)
return self._post_diff_config(
diff_response=diff_response,
scrapli_responses=scrapli_responses,
source_config=source_config,
candidate_config=candidate_config,
device_diff=device_diff,
)
| 34.626984 | 100 | 0.616548 | [
"MIT"
] | m1009d/scrapli_cfg | scrapli_cfg/platform/core/cisco_iosxe/sync_platform.py | 13,089 | Python |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDatacenterConnector
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vm-migration
# [START vmmigration_v1_generated_VmMigration_GetDatacenterConnector_sync]
from google.cloud import vmmigration_v1
def sample_get_datacenter_connector():
# Create a client
client = vmmigration_v1.VmMigrationClient()
# Initialize request argument(s)
request = vmmigration_v1.GetDatacenterConnectorRequest(
name="name_value",
)
# Make the request
response = client.get_datacenter_connector(request=request)
# Handle the response
print(response)
# [END vmmigration_v1_generated_VmMigration_GetDatacenterConnector_sync]
| 32.934783 | 85 | 0.768977 | [
"Apache-2.0"
] | googleapis/python-vm-migration | samples/generated_samples/vmmigration_v1_generated_vm_migration_get_datacenter_connector_sync.py | 1,515 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
extensions = [
'otcdocstheme'
]
html_theme = 'otcdocs'
html_theme_options = {
}
otcdocs_auto_name = False
otcdocs_auto_version = False
project = 'Dummy Service' # FIXME
otcdocs_repo_name = 'opentelekomcloud-docs/template' # FIXME
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2022-present, Open Telekom Cloud'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Dummy UMN" # FIXME
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'dummydoc' # FIXME
latex_documents = [
('index',
'umn-dummy.tex', # FIXME
u'%s User Manual Documentation' % project,
u'OpenTelekomCloud', 'manual'),
]
| 32.696203 | 79 | 0.714092 | [
"Apache-2.0"
] | kucerakk/template | umn/source/conf.py | 5,166 | Python |
import os
import json
import zipfile
import tempfile
from pathlib import Path
from copy import deepcopy
from .exception import FrictionlessException
from .metadata import Metadata
from .detector import Detector
from .resource import Resource
from .system import system
from . import helpers
from . import errors
from . import config
class Package(Metadata):
"""Package representation
API | Usage
-------- | --------
Public | `from frictionless import Package`
This class is one of the cornerstones of of Frictionless framework.
It manages underlaying resource and provides an ability to describe a package.
```python
package = Package(resources=[Resource(path="data/table.csv")])
package.get_resoure('table').read_rows() == [
{'id': 1, 'name': 'english'},
{'id': 2, 'name': '中国人'},
]
```
Parameters:
source (any): Source of the package; can be in various forms.
Usually, it's a package descriptor in a form of dict or path
Also, it can be a glob pattern or a resource path
descriptor (dict|str): A resource descriptor provided explicitly.
Keyword arguments will patch this descriptor if provided.
resources? (dict|Resource[]): A list of resource descriptors.
It can be dicts or Resource instances.
id? (str): A property reserved for globally unique identifiers.
Examples of identifiers that are unique include UUIDs and DOIs.
name? (str): A short url-usable (and preferably human-readable) name.
This MUST be lower-case and contain only alphanumeric characters
along with “.”, “_” or “-” characters.
title? (str): A Package title according to the specs
It should a human-oriented title of the resource.
description? (str): A Package description according to the specs
It should a human-oriented description of the resource.
licenses? (dict[]): The license(s) under which the package is provided.
If omitted it's considered the same as the package's licenses.
sources? (dict[]): The raw sources for this data package.
It MUST be an array of Source objects.
Each Source object MUST have a title and
MAY have path and/or email properties.
profile? (str): A string identifying the profile of this descriptor.
For example, `fiscal-data-package`.
homepage? (str): A URL for the home on the web that is related to this package.
For example, github repository or ckan dataset address.
version? (str): A version string identifying the version of the package.
It should conform to the Semantic Versioning requirements and
should follow the Data Package Version pattern.
contributors? (dict[]): The people or organizations who contributed to this package.
It MUST be an array. Each entry is a Contributor and MUST be an object.
A Contributor MUST have a title property and MAY contain
path, email, role and organization properties.
keywords? (str[]): An Array of string keywords to assist users searching.
For example, ['data', 'fiscal']
image? (str): An image to use for this data package.
For example, when showing the package in a listing.
created? (str): The datetime on which this was created.
The datetime must conform to the string formats for RFC3339 datetime,
basepath? (str): A basepath of the resource
The fullpath of the resource is joined `basepath` and /path`
detector? (Detector): File/table detector.
For more information, please check the Detector documentation.
onerror? (ignore|warn|raise): Behaviour if there is an error.
It defaults to 'ignore'. The default mode will ignore all errors
on resource level and they should be handled by the user
being available in Header and Row objects.
trusted? (bool): Don't raise an exception on unsafe paths.
A path provided as a part of the descriptor considered unsafe
if there are path traversing or the path is absolute.
A path provided as `source` or `path` is alway trusted.
hashing? (str): a hashing algorithm for resources
It defaults to 'md5'.
Raises:
FrictionlessException: raise any error that occurs during the process
"""
def __init__(
self,
source=None,
*,
descriptor=None,
# Spec
resources=None,
id=None,
name=None,
title=None,
description=None,
licenses=None,
sources=None,
profile=None,
homepage=None,
version=None,
contributors=None,
keywords=None,
image=None,
created=None,
# Extra
basepath="",
detector=None,
onerror="ignore",
trusted=False,
hashing=None,
):
# Handle source
if source is not None:
if descriptor is None:
descriptor = source
file = system.create_file(source, basepath=basepath)
if file.multipart:
descriptor = {"resources": []}
for part in file.normpath:
descriptor["resources"].append({"path": part})
elif file.type == "table" and not file.compression:
descriptor = {"resources": [{"path": file.normpath}]}
# Handle pathlib
if isinstance(descriptor, Path):
descriptor = str(descriptor)
# Handle trusted
if descriptor is None:
trusted = True
# Handle zip
if helpers.is_zip_descriptor(descriptor):
descriptor = helpers.unzip_descriptor(descriptor, "datapackage.json")
# Set attributes
self.setinitial("resources", resources)
self.setinitial("name", name)
self.setinitial("id", id)
self.setinitial("licenses", licenses)
self.setinitial("profile", profile)
self.setinitial("title", title)
self.setinitial("description", description)
self.setinitial("homepage", homepage)
self.setinitial("version", version)
self.setinitial("sources", sources)
self.setinitial("contributors", contributors)
self.setinitial("keywords", keywords)
self.setinitial("image", image)
self.setinitial("created", created)
self.__basepath = basepath or helpers.parse_basepath(descriptor)
self.__detector = detector or Detector()
self.__onerror = onerror
self.__trusted = trusted
self.__hashing = hashing
super().__init__(descriptor)
def __setattr__(self, name, value):
if name == "hashing":
self.__hashing = value
elif name == "basepath":
self.__basepath = value
elif name == "onerror":
self.__onerror = value
elif name == "trusted":
self.__trusted = value
else:
return super().__setattr__(name, value)
self.metadata_process()
@Metadata.property
def name(self):
"""
Returns:
str?: package name
"""
return self.get("name")
@Metadata.property
def id(self):
"""
Returns:
str?: package id
"""
return self.get("id")
@Metadata.property
def licenses(self):
"""
Returns:
dict?: package licenses
"""
return self.get("licenses")
@Metadata.property
def profile(self):
"""
Returns:
str: package profile
"""
return self.get("profile", config.DEFAULT_PACKAGE_PROFILE)
@Metadata.property
def title(self):
"""
Returns:
str?: package title
"""
return self.get("title")
@Metadata.property
def description(self):
"""
Returns:
str?: package description
"""
return self.get("description")
@Metadata.property
def homepage(self):
"""
Returns:
str?: package homepage
"""
return self.get("homepage")
@Metadata.property
def version(self):
"""
Returns:
str?: package version
"""
return self.get("version")
@Metadata.property
def sources(self):
"""
Returns:
dict[]?: package sources
"""
return self.get("sources")
@Metadata.property
def contributors(self):
"""
Returns:
dict[]?: package contributors
"""
return self.get("contributors")
@Metadata.property
def keywords(self):
"""
Returns:
str[]?: package keywords
"""
return self.get("keywords")
@Metadata.property
def image(self):
"""
Returns:
str?: package image
"""
return self.get("image")
@Metadata.property
def created(self):
"""
Returns:
str?: package created
"""
return self.get("created")
@Metadata.property(cache=False, write=False)
def hashing(self):
"""
Returns:
str: package hashing
"""
return self.__hashing
@Metadata.property(cache=False, write=False)
def basepath(self):
"""
Returns:
str: package basepath
"""
return self.__basepath
@Metadata.property(cache=False, write=False)
def onerror(self):
"""
Returns:
ignore|warn|raise: on error bahaviour
"""
return self.__onerror
@Metadata.property(cache=False, write=False)
def trusted(self):
"""
Returns:
str: package trusted
"""
return self.__trusted
# Resources
@Metadata.property
def resources(self):
"""
Returns:
Resources[]: package resource
"""
resources = self.get("resources", [])
return self.metadata_attach("resources", resources)
@Metadata.property(cache=False, write=False)
def resource_names(self):
"""
Returns:
str[]: package resource names
"""
return [resource.name for resource in self.resources]
def add_resource(self, descriptor):
"""Add new resource to package.
Parameters:
descriptor (dict): resource descriptor
Returns:
Resource/None: added `Resource` instance or `None` if not added
"""
self.setdefault("resources", [])
self["resources"].append(descriptor)
return self.resources[-1]
def get_resource(self, name):
"""Get resource by name.
Parameters:
name (str): resource name
Raises:
FrictionlessException: if resource is not found
Returns:
Resource/None: `Resource` instance or `None` if not found
"""
for resource in self.resources:
if resource.name == name:
return resource
error = errors.PackageError(note=f'resource "{name}" does not exist')
raise FrictionlessException(error)
def has_resource(self, name):
"""Check if a resource is present
Parameters:
name (str): schema resource name
Returns:
bool: whether there is the resource
"""
for resource in self.resources:
if resource.name == name:
return True
return False
def remove_resource(self, name):
"""Remove resource by name.
Parameters:
name (str): resource name
Raises:
FrictionlessException: if resource is not found
Returns:
Resource/None: removed `Resource` instances or `None` if not found
"""
resource = self.get_resource(name)
self.resources.remove(resource)
return resource
# Expand
def expand(self):
"""Expand metadata
It will add default values to the package.
"""
self.setdefault("resources", self.resources)
self.setdefault("profile", self.profile)
for resource in self.resources:
resource.expand()
# Infer
def infer(self, *, stats=False):
"""Infer package's attributes
Parameters:
stats? (bool): stream files completely and infer stats
"""
# General
self.setdefault("profile", config.DEFAULT_PACKAGE_PROFILE)
for resource in self.resources:
resource.infer(stats=stats)
# Deduplicate names
if len(self.resource_names) != len(set(self.resource_names)):
seen_names = []
for index, name in enumerate(self.resource_names):
count = seen_names.count(name) + 1
if count > 1:
self.resources[index].name = "%s%s" % (name, count)
seen_names.append(name)
# Import/Export
def to_copy(self):
"""Create a copy of the package"""
descriptor = self.to_dict()
# Resource's data can be not serializable (generators/functions)
descriptor.pop("resources", None)
resources = []
for resource in self.resources:
resources.append(resource.to_copy())
return Package(
descriptor,
resources=resources,
basepath=self.__basepath,
onerror=self.__onerror,
trusted=self.__trusted,
)
@staticmethod
def from_bigquery(source, *, dialect=None):
"""Import package from Bigquery
Parameters:
source (string): BigQuery `Service` object
dialect (dict): BigQuery dialect
Returns:
Package: package
"""
storage = system.create_storage("bigquery", source, dialect=dialect)
return storage.read_package()
def to_bigquery(self, target, *, dialect=None):
"""Export package to Bigquery
Parameters:
target (string): BigQuery `Service` object
dialect (dict): BigQuery dialect
Returns:
BigqueryStorage: storage
"""
storage = system.create_storage("bigquery", target, dialect=dialect)
storage.write_package(self.to_copy(), force=True)
return storage
@staticmethod
def from_ckan(source, *, dialect=None):
"""Import package from CKAN
Parameters:
source (string): CKAN instance url e.g. "https://demo.ckan.org"
dialect (dict): CKAN dialect
Returns:
Package: package
"""
storage = system.create_storage("ckan", source, dialect=dialect)
return storage.read_package()
def to_ckan(self, target, *, dialect=None):
"""Export package to CKAN
Parameters:
target (string): CKAN instance url e.g. "https://demo.ckan.org"
dialect (dict): CKAN dialect
Returns:
CkanStorage: storage
"""
storage = system.create_storage("ckan", target, dialect=dialect)
storage.write_package(self.to_copy(), force=True)
return storage
@staticmethod
def from_sql(source, *, dialect=None):
"""Import package from SQL
Parameters:
source (any): SQL connection string of engine
dialect (dict): SQL dialect
Returns:
Package: package
"""
storage = system.create_storage("sql", source, dialect=dialect)
return storage.read_package()
def to_sql(self, target, *, dialect=None):
"""Export package to SQL
Parameters:
target (any): SQL connection string of engine
dialect (dict): SQL dialect
Returns:
SqlStorage: storage
"""
storage = system.create_storage("sql", target, dialect=dialect)
storage.write_package(self.to_copy(), force=True)
return storage
@staticmethod
def from_zip(path, **options):
"""Create a package from ZIP
Parameters:
path(str): file path
**options(dict): resouce options
"""
return Package(descriptor=path, **options)
def to_zip(self, path, *, encoder_class=None):
"""Save package to a zip
Parameters:
path (str): target path
encoder_class (object): json encoder class
Raises:
FrictionlessException: on any error
"""
try:
with zipfile.ZipFile(path, "w") as archive:
package_descriptor = self.to_dict()
for index, resource in enumerate(self.resources):
descriptor = package_descriptor["resources"][index]
# Remote data
if resource.remote:
pass
# Memory data
elif resource.memory:
if not isinstance(resource.data, list):
path = f"{resource.name}.csv"
descriptor["path"] = path
del descriptor["data"]
with tempfile.NamedTemporaryFile() as file:
tgt = Resource(path=file.name, format="csv", trusted=True)
resource.write(tgt)
archive.write(file.name, path)
# Multipart data
elif resource.multipart:
for path, fullpath in zip(resource.path, resource.fullpath):
if os.path.isfile(fullpath):
if not helpers.is_safe_path(fullpath):
note = f'Zipping usafe "{fullpath}" is not supported'
error = errors.PackageError(note=note)
raise FrictionlessException(error)
archive.write(fullpath, path)
# Local Data
else:
path = resource.path
fullpath = resource.fullpath
if os.path.isfile(fullpath):
if not helpers.is_safe_path(fullpath):
note = f'Zipping usafe "{fullpath}" is not supported'
error = errors.PackageError(note=note)
raise FrictionlessException(error)
archive.write(fullpath, path)
# Metadata
archive.writestr(
"datapackage.json",
json.dumps(
package_descriptor,
indent=2,
ensure_ascii=False,
cls=encoder_class,
),
)
except Exception as exception:
error = errors.PackageError(note=str(exception))
raise FrictionlessException(error) from exception
# Metadata
metadata_duplicate = True
metadata_Error = errors.PackageError # type: ignore
metadata_profile = deepcopy(config.PACKAGE_PROFILE)
metadata_profile["properties"]["resources"] = {"type": "array"}
def metadata_process(self):
# Resources
resources = self.get("resources")
if isinstance(resources, list):
for index, resource in enumerate(resources):
if not isinstance(resource, Resource):
if not isinstance(resource, dict):
resource = {"name": f"resource{index+1}"}
resource = Resource(
resource,
basepath=self.__basepath,
detector=self.__detector,
hashing=self.__hashing,
)
list.__setitem__(resources, index, resource)
resource.onerror = self.__onerror
resource.trusted = self.__trusted
resource.package = self
if not isinstance(resources, helpers.ControlledList):
resources = helpers.ControlledList(resources)
resources.__onchange__(self.metadata_process)
dict.__setitem__(self, "resources", resources)
def metadata_validate(self):
yield from super().metadata_validate()
# Extensions
if self.profile == "fiscal-data-package":
yield from super().metadata_validate(config.FISCAL_PACKAGE_PROFILE)
# Resources
for resource in self.resources:
yield from resource.metadata_errors
| 31.3789 | 92 | 0.561748 | [
"MIT"
] | augusto-herrmann/frictionless-py | frictionless/package.py | 21,136 | Python |
"""
utils.py
"""
import pathlib
import tempfile
import shutil
import curio.io as io
async def atomic_write(p, data):
p = pathlib.Path(p)
with tempfile.NamedTemporaryFile(dir=p.parent, delete=False) as f:
af = io.FileStream(f)
res = await af.write(data)
shutil.move(f.name, p)
return res
| 17.888889 | 70 | 0.664596 | [
"MIT"
] | Zaharid/zsvc | zsvc/utils.py | 322 | Python |
from aiohttp import web
import asyncio
import uvloop
async def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(text=text)
app = web.Application()
app.add_routes([web.get('/', handle),
web.get('/{name}', handle)])
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
web.run_app(app)
| 23.8125 | 55 | 0.685039 | [
"Apache-2.0"
] | decaun/easy-python-study | async/http/aioserver.py | 381 | Python |
#
# PySNMP MIB module INT-SERV-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INT-SERV-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:18:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Counter32, IpAddress, ModuleIdentity, Unsigned32, MibIdentifier, NotificationType, Integer32, TimeTicks, Bits, mib_2, iso, Gauge32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Counter32", "IpAddress", "ModuleIdentity", "Unsigned32", "MibIdentifier", "NotificationType", "Integer32", "TimeTicks", "Bits", "mib-2", "iso", "Gauge32", "ObjectIdentity")
DisplayString, TruthValue, RowStatus, TestAndIncr, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TruthValue", "RowStatus", "TestAndIncr", "TextualConvention")
intSrv = ModuleIdentity((1, 3, 6, 1, 2, 1, 52))
if mibBuilder.loadTexts: intSrv.setLastUpdated('9710030642Z')
if mibBuilder.loadTexts: intSrv.setOrganization('IETF Integrated Services Working Group')
if mibBuilder.loadTexts: intSrv.setContactInfo(' Fred Baker Postal: Cisco Systems 519 Lado Drive Santa Barbara, California 93111 Tel: +1 805 681 0115 E-Mail: [email protected] John Krawczyk Postal: ArrowPoint Communications 235 Littleton Road Westford, Massachusetts 01886 Tel: +1 508 692 5875 E-Mail: [email protected]')
if mibBuilder.loadTexts: intSrv.setDescription('The MIB module to describe the Integrated Services Protocol')
intSrvObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 1))
intSrvGenObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 2))
intSrvNotifications = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 3))
intSrvConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4))
class SessionNumber(TextualConvention, Integer32):
description = 'The Session Number convention is used for numbers identifying sessions or saved PATH or RESV information. It is a number in the range returned by a TestAndIncr variable, having no protocol meaning whatsoever but serving instead as simple identifier. The alternative was a very complex instance or instance object that became unwieldy.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class Protocol(TextualConvention, Integer32):
description = 'The value of the IP Protocol field of an IP Datagram Header. This identifies the protocol layer above IP. For example, the value 6 is used for TCP and the value 17 is used for UDP. The values of this field are defined in the As- signed Numbers RFC.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class SessionType(TextualConvention, Integer32):
description = "The value of the C-Type field of a Session ob- ject, as defined in the RSVP specification. This value determines the lengths of octet strings and use of certain objects such as the 'port' variables. If the C-Type calls for an IP6 address, one would expect all source, des- tination, and next/previous hop addresses to be 16 bytes long, and for the ports to be UDP/TCP port numbers, for example."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 255)
class Port(TextualConvention, OctetString):
description = 'The value of the UDP or TCP Source or Destina- tion Port field, a virtual destination port or generalized port identifier used with the IPSEC Authentication Header or Encapsulating Security Payload, or other session discriminator. If it is not used, the value should be of length 0. This pair, when coupled with the IP Addresses of the source and destination system and the IP protocol field, uniquely identifies a data stream.'
status = 'current'
displayHint = 'd'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(2, 4)
class MessageSize(TextualConvention, Integer32):
description = 'The size of a message in bytes. This is used to specify the minimum and maximum size of a message along an integrated services route.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BitRate(TextualConvention, Integer32):
description = 'The rate, in bits/second, that data may move in the context. Applicable contexts minimally include the speed of an interface or virtual circuit, the data rate of a (potentially aggre- gated) data flow, or the data rate to be allo- cated for use by a flow.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class BurstSize(TextualConvention, Integer32):
description = 'The number of octets of IP Data, including IP Headers, that a stream may send without concern for policing.'
status = 'current'
displayHint = 'd'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class QosService(TextualConvention, Integer32):
description = 'The class of service in use by a flow.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 5))
namedValues = NamedValues(("bestEffort", 1), ("guaranteedDelay", 2), ("controlledLoad", 5))
intSrvIfAttribTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 1), )
if mibBuilder.loadTexts: intSrvIfAttribTable.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribTable.setDescription("The reservable attributes of the system's in- terfaces.")
intSrvIfAttribEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: intSrvIfAttribEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribEntry.setDescription('The reservable attributes of a given inter- face.')
intSrvIfAttribAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 1), BitRate()).setUnits('Bits per second').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBits.setDescription('The number of bits/second currently allocated to reserved sessions on the interface.')
intSrvIfAttribMaxAllocatedBits = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 2), BitRate()).setUnits('Bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribMaxAllocatedBits.setDescription('The maximum number of bits/second that may be allocated to reserved sessions on the inter- face.')
intSrvIfAttribAllocatedBuffer = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 3), BurstSize()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribAllocatedBuffer.setDescription('The amount of buffer space required to hold the simultaneous burst of all reserved flows on the interface.')
intSrvIfAttribFlows = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvIfAttribFlows.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribFlows.setDescription('The number of reserved flows currently active on this interface. A flow can be created ei- ther from a reservation protocol (such as RSVP or ST-II) or via configuration information.')
intSrvIfAttribPropagationDelay = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 5), Integer32()).setUnits('microseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribPropagationDelay.setDescription('The amount of propagation delay that this in- terface introduces in addition to that intro- diced by bit propagation delays.')
intSrvIfAttribStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 1, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvIfAttribStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribStatus.setDescription("'active' on interfaces that are configured for RSVP.")
intSrvFlowTable = MibTable((1, 3, 6, 1, 2, 1, 52, 1, 2), )
if mibBuilder.loadTexts: intSrvFlowTable.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowTable.setDescription("Information describing the reserved flows us- ing the system's interfaces.")
intSrvFlowEntry = MibTableRow((1, 3, 6, 1, 2, 1, 52, 1, 2, 1), ).setIndexNames((0, "INT-SERV-MIB", "intSrvFlowNumber"))
if mibBuilder.loadTexts: intSrvFlowEntry.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowEntry.setDescription('Information describing the use of a given in- terface by a given flow. The counter intSrvFlowPoliced starts counting at the in- stallation of the flow.')
intSrvFlowNumber = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 1), SessionNumber())
if mibBuilder.loadTexts: intSrvFlowNumber.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNumber.setDescription('The number of this flow. This is for SNMP In- dexing purposes only and has no relation to any protocol value.')
intSrvFlowType = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 2), SessionType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowType.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowType.setDescription('The type of session (IP4, IP6, IP6 with flow information, etc).')
intSrvFlowOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("rsvp", 2), ("management", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOwner.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOwner.setDescription('The process that installed this flow in the queue policy database.')
intSrvFlowDestAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddr.setDescription("The destination address used by all senders in this session. This object may not be changed when the value of the RowStatus object is 'ac- tive'.")
intSrvFlowSenderAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddr.setDescription("The source address of the sender selected by this reservation. The value of all zeroes in- dicates 'all senders'. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestAddrLength.setDescription("The length of the destination address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowSenderAddrLength = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowSenderAddrLength.setDescription("The length of the sender's address in bits. This is the CIDR Prefix Length, which for IP4 hosts and multicast addresses is 32 bits. This object may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 8), Protocol()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowProtocol.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowProtocol.setDescription("The IP Protocol used by a session. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowDestPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 9), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDestPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDestPort.setDescription("The UDP or TCP port number used as a destina- tion port for all senders in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol, is 50 (ESP) or 51 (AH), this represents a virtual destination port number. A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowPort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 10), Port()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowPort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPort.setDescription("The UDP or TCP port number used as a source port for this sender in this session. If the IP protocol in use, specified by intSrvResvFwdProtocol is 50 (ESP) or 51 (AH), this represents a generalized port identifier (GPI). A value of zero indicates that the IP protocol in use does not have ports. This ob- ject may not be changed when the value of the RowStatus object is 'active'.")
intSrvFlowFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowFlowId.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowFlowId.setDescription('The flow ID that this sender is using, if this is an IPv6 session.')
intSrvFlowInterface = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 12), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowInterface.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowInterface.setDescription('The ifIndex value of the interface on which this reservation exists.')
intSrvFlowIfAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 13), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 16))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowIfAddr.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowIfAddr.setDescription('The IP Address on the ifEntry on which this reservation exists. This is present primarily to support those interfaces which layer multi- ple IP Addresses on the interface.')
intSrvFlowRate = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 14), BitRate()).setUnits('bits per second').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowRate.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowRate.setDescription("The Reserved Rate of the sender's data stream. If this is a Controlled Load service flow, this rate is derived from the Tspec rate parameter (r). If this is a Guaranteed service flow, this rate is derived from the Rspec clearing rate parameter (R).")
intSrvFlowBurst = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 15), BurstSize()).setUnits('bytes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowBurst.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBurst.setDescription("The size of the largest burst expected from the sender at a time. If this is less than the sender's advertised burst size, the receiver is asking the network to provide flow pacing beyond what would be provided under normal circumstances. Such pac- ing is at the network's option.")
intSrvFlowWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 16), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowWeight.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowWeight.setDescription('The weight used to prioritize the traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of weighting procedures.')
intSrvFlowQueue = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 17), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowQueue.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowQueue.setDescription('The number of the queue used by this traffic. Note that the interpretation of this object is implementation-specific, as implementations vary in their use of queue identifiers.')
intSrvFlowMinTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 18), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMinTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMinTU.setDescription('The minimum message size for this flow. The policing algorithm will treat smaller messages as though they are this size.')
intSrvFlowMaxTU = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 19), MessageSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowMaxTU.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowMaxTU.setDescription('The maximum datagram size for this flow that will conform to the traffic specification. This value cannot exceed the MTU of the interface.')
intSrvFlowBestEffort = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowBestEffort.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowBestEffort.setDescription('The number of packets that were remanded to best effort service.')
intSrvFlowPoliced = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowPoliced.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowPoliced.setDescription("The number of packets policed since the incep- tion of the flow's service.")
intSrvFlowDiscard = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 22), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowDiscard.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowDiscard.setDescription("If 'true', the flow is to incur loss when traffic is policed. If 'false', policed traff- ic is treated as best effort traffic.")
intSrvFlowService = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 23), QosService()).setMaxAccess("readonly")
if mibBuilder.loadTexts: intSrvFlowService.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowService.setDescription('The QoS service being applied to this flow.')
intSrvFlowOrder = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowOrder.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowOrder.setDescription('In the event of ambiguity, the order in which the classifier should make its comparisons. The row with intSrvFlowOrder=0 is tried first, and comparisons proceed in the order of in- creasing value. Non-serial implementations of the classifier should emulate this behavior.')
intSrvFlowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 52, 1, 2, 1, 25), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: intSrvFlowStatus.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowStatus.setDescription("'active' for all active flows. This object may be used to install static classifier infor- mation, delete classifier information, or au- thorize such.")
intSrvFlowNewIndex = MibScalar((1, 3, 6, 1, 2, 1, 52, 2, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: intSrvFlowNewIndex.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowNewIndex.setDescription("This object is used to assign values to intSrvFlowNumber as described in 'Textual Con- ventions for SNMPv2'. The network manager reads the object, and then writes the value back in the SET that creates a new instance of intSrvFlowEntry. If the SET fails with the code 'inconsistentValue', then the process must be repeated; If the SET succeeds, then the ob- ject is incremented, and the new instance is created according to the manager's directions.")
intSrvGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 1))
intSrvCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 52, 4, 2))
intSrvCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 52, 4, 2, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribGroup"), ("INT-SERV-MIB", "intSrvFlowsGroup"), ("INT-SERV-MIB", "intSrvGenObjectsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvCompliance = intSrvCompliance.setStatus('current')
if mibBuilder.loadTexts: intSrvCompliance.setDescription('The compliance statement ')
intSrvIfAttribGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 1)).setObjects(("INT-SERV-MIB", "intSrvIfAttribAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribMaxAllocatedBits"), ("INT-SERV-MIB", "intSrvIfAttribAllocatedBuffer"), ("INT-SERV-MIB", "intSrvIfAttribFlows"), ("INT-SERV-MIB", "intSrvIfAttribPropagationDelay"), ("INT-SERV-MIB", "intSrvIfAttribStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvIfAttribGroup = intSrvIfAttribGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvIfAttribGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvFlowsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 2)).setObjects(("INT-SERV-MIB", "intSrvFlowType"), ("INT-SERV-MIB", "intSrvFlowOwner"), ("INT-SERV-MIB", "intSrvFlowDestAddr"), ("INT-SERV-MIB", "intSrvFlowSenderAddr"), ("INT-SERV-MIB", "intSrvFlowDestAddrLength"), ("INT-SERV-MIB", "intSrvFlowSenderAddrLength"), ("INT-SERV-MIB", "intSrvFlowProtocol"), ("INT-SERV-MIB", "intSrvFlowDestPort"), ("INT-SERV-MIB", "intSrvFlowPort"), ("INT-SERV-MIB", "intSrvFlowFlowId"), ("INT-SERV-MIB", "intSrvFlowInterface"), ("INT-SERV-MIB", "intSrvFlowBestEffort"), ("INT-SERV-MIB", "intSrvFlowRate"), ("INT-SERV-MIB", "intSrvFlowBurst"), ("INT-SERV-MIB", "intSrvFlowWeight"), ("INT-SERV-MIB", "intSrvFlowQueue"), ("INT-SERV-MIB", "intSrvFlowMinTU"), ("INT-SERV-MIB", "intSrvFlowMaxTU"), ("INT-SERV-MIB", "intSrvFlowDiscard"), ("INT-SERV-MIB", "intSrvFlowPoliced"), ("INT-SERV-MIB", "intSrvFlowService"), ("INT-SERV-MIB", "intSrvFlowIfAddr"), ("INT-SERV-MIB", "intSrvFlowOrder"), ("INT-SERV-MIB", "intSrvFlowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvFlowsGroup = intSrvFlowsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvFlowsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
intSrvGenObjectsGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 52, 4, 1, 3)).setObjects(("INT-SERV-MIB", "intSrvFlowNewIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
intSrvGenObjectsGroup = intSrvGenObjectsGroup.setStatus('current')
if mibBuilder.loadTexts: intSrvGenObjectsGroup.setDescription('These objects are required for Systems sup- porting the Integrated Services Architecture.')
mibBuilder.exportSymbols("INT-SERV-MIB", BitRate=BitRate, intSrvIfAttribAllocatedBits=intSrvIfAttribAllocatedBits, intSrvFlowMaxTU=intSrvFlowMaxTU, intSrvFlowOrder=intSrvFlowOrder, PYSNMP_MODULE_ID=intSrv, Protocol=Protocol, intSrvIfAttribAllocatedBuffer=intSrvIfAttribAllocatedBuffer, intSrvFlowDestAddr=intSrvFlowDestAddr, intSrvFlowBurst=intSrvFlowBurst, intSrvIfAttribFlows=intSrvIfAttribFlows, intSrvFlowTable=intSrvFlowTable, intSrvFlowEntry=intSrvFlowEntry, intSrvFlowSenderAddrLength=intSrvFlowSenderAddrLength, intSrvIfAttribGroup=intSrvIfAttribGroup, intSrvFlowInterface=intSrvFlowInterface, intSrvFlowDestAddrLength=intSrvFlowDestAddrLength, intSrvFlowDestPort=intSrvFlowDestPort, BurstSize=BurstSize, intSrvFlowStatus=intSrvFlowStatus, intSrvIfAttribMaxAllocatedBits=intSrvIfAttribMaxAllocatedBits, intSrvFlowNewIndex=intSrvFlowNewIndex, intSrvGroups=intSrvGroups, MessageSize=MessageSize, intSrvFlowRate=intSrvFlowRate, intSrvFlowPort=intSrvFlowPort, intSrvFlowIfAddr=intSrvFlowIfAddr, SessionType=SessionType, intSrvIfAttribTable=intSrvIfAttribTable, intSrvIfAttribPropagationDelay=intSrvIfAttribPropagationDelay, intSrvFlowService=intSrvFlowService, intSrvFlowsGroup=intSrvFlowsGroup, intSrvFlowWeight=intSrvFlowWeight, intSrvFlowMinTU=intSrvFlowMinTU, intSrvFlowProtocol=intSrvFlowProtocol, intSrvFlowOwner=intSrvFlowOwner, intSrvIfAttribEntry=intSrvIfAttribEntry, intSrvFlowSenderAddr=intSrvFlowSenderAddr, QosService=QosService, SessionNumber=SessionNumber, intSrvObjects=intSrvObjects, intSrvGenObjects=intSrvGenObjects, intSrvFlowFlowId=intSrvFlowFlowId, intSrvCompliances=intSrvCompliances, intSrv=intSrv, intSrvFlowNumber=intSrvFlowNumber, intSrvNotifications=intSrvNotifications, intSrvFlowQueue=intSrvFlowQueue, intSrvFlowBestEffort=intSrvFlowBestEffort, intSrvFlowType=intSrvFlowType, intSrvCompliance=intSrvCompliance, Port=Port, intSrvIfAttribStatus=intSrvIfAttribStatus, intSrvFlowPoliced=intSrvFlowPoliced, intSrvFlowDiscard=intSrvFlowDiscard, intSrvGenObjectsGroup=intSrvGenObjectsGroup, intSrvConformance=intSrvConformance)
| 129.878788 | 2,054 | 0.780876 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/INT-SERV-MIB.py | 25,716 | Python |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from six import iteritems, string_types
from elasticsearch7.helpers import scan
from elasticsearch7.exceptions import TransportError
from .query import Q, Bool
from .aggs import A, AggBase
from .utils import DslBase, AttrDict
from .response import Response, Hit
from .connections import get_connection
from .exceptions import IllegalOperation
class QueryProxy(object):
"""
Simple proxy around DSL objects (queries) that can be called
(to add query/post_filter) and also allows attribute access which is proxied to
the wrapped query.
"""
def __init__(self, search, attr_name):
self._search = search
self._proxied = None
self._attr_name = attr_name
def __nonzero__(self):
return self._proxied is not None
__bool__ = __nonzero__
def __call__(self, *args, **kwargs):
s = self._search._clone()
# we cannot use self._proxied since we just cloned self._search and
# need to access the new self on the clone
proxied = getattr(s, self._attr_name)
if proxied._proxied is None:
proxied._proxied = Q(*args, **kwargs)
else:
proxied._proxied &= Q(*args, **kwargs)
# always return search to be chainable
return s
def __getattr__(self, attr_name):
return getattr(self._proxied, attr_name)
def __setattr__(self, attr_name, value):
if not attr_name.startswith("_"):
self._proxied = Q(self._proxied.to_dict())
setattr(self._proxied, attr_name, value)
super(QueryProxy, self).__setattr__(attr_name, value)
def __getstate__(self):
return self._search, self._proxied, self._attr_name
def __setstate__(self, state):
self._search, self._proxied, self._attr_name = state
class ProxyDescriptor(object):
"""
Simple descriptor to enable setting of queries and filters as:
s = Search()
s.query = Q(...)
"""
def __init__(self, name):
self._attr_name = "_%s_proxy" % name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
proxy = getattr(instance, self._attr_name)
proxy._proxied = Q(value)
class AggsProxy(AggBase, DslBase):
name = "aggs"
def __init__(self, search):
self._base = self
self._search = search
self._params = {"aggs": {}}
def to_dict(self):
return super(AggsProxy, self).to_dict().get("aggs", {})
class Request(object):
def __init__(self, using="default", index=None, doc_type=None, extra=None):
self._using = using
self._index = None
if isinstance(index, (tuple, list)):
self._index = list(index)
elif index:
self._index = [index]
self._doc_type = []
self._doc_type_map = {}
if isinstance(doc_type, (tuple, list)):
self._doc_type.extend(doc_type)
elif isinstance(doc_type, collections_abc.Mapping):
self._doc_type.extend(doc_type.keys())
self._doc_type_map.update(doc_type)
elif doc_type:
self._doc_type.append(doc_type)
self._params = {}
self._extra = extra or {}
def __eq__(self, other):
return (
isinstance(other, Request)
and other._params == self._params
and other._index == self._index
and other._doc_type == self._doc_type
and other.to_dict() == self.to_dict()
)
def __copy__(self):
return self._clone()
def params(self, **kwargs):
"""
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
"""
s = self._clone()
s._params.update(kwargs)
return s
def index(self, *index):
"""
Set the index for the search. If called empty it will remove all information.
Example:
s = Search()
s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')
s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
"""
# .index() resets
s = self._clone()
if not index:
s._index = None
else:
indexes = []
for i in index:
if isinstance(i, string_types):
indexes.append(i)
elif isinstance(i, list):
indexes += i
elif isinstance(i, tuple):
indexes += list(i)
s._index = (self._index or []) + indexes
return s
def _resolve_field(self, path):
for dt in self._doc_type:
if not hasattr(dt, "_index"):
continue
field = dt._index.resolve_field(path)
if field is not None:
return field
def _resolve_nested(self, hit, parent_class=None):
doc_class = Hit
nested_path = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path)
else:
nested_field = self._resolve_field(nested_path)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _get_result(self, hit, parent_class=None):
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = Response(
self, hit["inner_hits"][t], doc_class=doc_class
)
callback = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def doc_type(self, *doc_type, **kwargs):
"""
Set the type to search through. You can supply a single value or
multiple. Values can be strings or subclasses of ``Document``.
You can also pass in any keyword arguments, mapping a doc_type to a
callback that should be used instead of the Hit class.
If no doc_type is supplied any information stored on the instance will
be erased.
Example:
s = Search().doc_type('product', 'store', User, custom=my_callback)
"""
# .doc_type() resets
s = self._clone()
if not doc_type and not kwargs:
s._doc_type = []
s._doc_type_map = {}
else:
s._doc_type.extend(doc_type)
s._doc_type.extend(kwargs.keys())
s._doc_type_map.update(kwargs)
return s
def using(self, client):
"""
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
"""
s = self._clone()
s._using = client
return s
def extra(self, **kwargs):
"""
Add extra keys to the request body. Mostly here for backwards
compatibility.
"""
s = self._clone()
if "from_" in kwargs:
kwargs["from"] = kwargs.pop("from_")
s._extra.update(kwargs)
return s
def _clone(self):
s = self.__class__(
using=self._using, index=self._index, doc_type=self._doc_type
)
s._doc_type_map = self._doc_type_map.copy()
s._extra = self._extra.copy()
s._params = self._params.copy()
return s
class Search(Request):
query = ProxyDescriptor("query")
post_filter = ProxyDescriptor("post_filter")
def __init__(self, **kwargs):
"""
Search request to elasticsearch.
:arg using: `Elasticsearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(Search, self).__init__(**kwargs)
self.aggs = AggsProxy(self)
self._sort = []
self._source = None
self._highlight = {}
self._highlight_opts = {}
self._suggest = {}
self._script_fields = {}
self._response_class = Response
self._query_proxy = QueryProxy(self, "query")
self._post_filter_proxy = QueryProxy(self, "post_filter")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
def __iter__(self):
"""
Iterate over the hits.
"""
return iter(self.execute())
def __getitem__(self, n):
"""
Support slicing the `Search` instance for pagination.
Slicing equates to the from/size parameters. E.g.::
s = Search().query(...)[0:25]
is equivalent to::
s = Search().query(...).extra(from_=0, size=25)
"""
s = self._clone()
if isinstance(n, slice):
# If negative slicing, abort.
if n.start and n.start < 0 or n.stop and n.stop < 0:
raise ValueError("Search does not support negative slicing.")
# Elasticsearch won't get all results so we default to size: 10 if
# stop not given.
s._extra["from"] = n.start or 0
s._extra["size"] = max(
0, n.stop - (n.start or 0) if n.stop is not None else 10
)
return s
else: # This is an index lookup, equivalent to slicing by [n:n+1].
# If negative index, abort.
if n < 0:
raise ValueError("Search does not support negative indexing.")
s._extra["from"] = n
s._extra["size"] = 1
return s
@classmethod
def from_dict(cls, d):
"""
Construct a new `Search` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
s = Search.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"aggs": {...}
})
s = s.filter('term', published=True)
"""
s = cls()
s.update_from_dict(d)
return s
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
s = super(Search, self)._clone()
s._response_class = self._response_class
s._sort = self._sort[:]
s._source = copy.copy(self._source) if self._source is not None else None
s._highlight = self._highlight.copy()
s._highlight_opts = self._highlight_opts.copy()
s._suggest = self._suggest.copy()
s._script_fields = self._script_fields.copy()
for x in ("query", "post_filter"):
getattr(s, x)._proxied = getattr(self, x)._proxied
# copy top-level bucket definitions
if self.aggs._params.get("aggs"):
s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
return s
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
s = self._clone()
s._response_class = cls
return s
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "post_filter" in d:
self.post_filter._proxied = Q(d.pop("post_filter"))
aggs = d.pop("aggs", d.pop("aggregations", {}))
if aggs:
self.aggs._params = {
"aggs": {name: A(value) for (name, value) in iteritems(aggs)}
}
if "sort" in d:
self._sort = d.pop("sort")
if "_source" in d:
self._source = d.pop("_source")
if "highlight" in d:
high = d.pop("highlight").copy()
self._highlight = high.pop("fields")
self._highlight_opts = high
if "suggest" in d:
self._suggest = d.pop("suggest")
if "text" in self._suggest:
text = self._suggest.pop("text")
for s in self._suggest.values():
s.setdefault("text", text)
if "script_fields" in d:
self._script_fields = d.pop("script_fields")
self._extra.update(d)
return self
def script_fields(self, **kwargs):
"""
Define script fields to be calculated on hits. See
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html
for more details.
Example::
s = Search()
s = s.script_fields(times_two="doc['field'].value * 2")
s = s.script_fields(
times_three={
'script': {
'inline': "doc['field'].value * params.n",
'params': {'n': 3}
}
}
)
"""
s = self._clone()
for name in kwargs:
if isinstance(kwargs[name], string_types):
kwargs[name] = {"script": kwargs[name]}
s._script_fields.update(kwargs)
return s
def source(self, fields=None, **kwargs):
"""
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'includes' and/or
'excludes' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(includes=['obj1.*'], excludes=["*.description"])
s = Search()
s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
"""
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
def sort(self, *keys):
"""
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = Search().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the Search object and can thus be chained.
"""
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith("-"):
if k[1:] == "_score":
raise IllegalOperation("Sorting by `-_score` is not allowed.")
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s
def highlight_options(self, **kwargs):
"""
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
"""
s = self._clone()
s._highlight_opts.update(kwargs)
return s
def highlight(self, *fields, **kwargs):
"""
Request highlighting of some fields. All keyword arguments passed in will be
used as parameters for all the fields in the ``fields`` parameter. Example::
Search().highlight('title', 'body', fragment_size=50)
will produce the equivalent of::
{
"highlight": {
"fields": {
"body": {"fragment_size": 50},
"title": {"fragment_size": 50}
}
}
}
If you want to have different options for different fields
you can call ``highlight`` twice::
Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
{
"highlight": {
"fields": {
"body": {"fragment_size": 100},
"title": {"fragment_size": 50}
}
}
}
"""
s = self._clone()
for f in fields:
s._highlight[f] = kwargs
return s
def suggest(self, name, text, **kwargs):
"""
Add a suggestions request to the search.
:arg name: name of the suggestion
:arg text: text to suggest on
All keyword arguments will be added to the suggestions body. For example::
s = Search()
s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
"""
s = self._clone()
s._suggest[name] = {"text": text}
s._suggest[name].update(kwargs)
return s
def to_dict(self, count=False, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify if we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
d["post_filter"] = self.post_filter.to_dict()
if self.aggs.aggs:
d.update(self.aggs.to_dict())
if self._sort:
d["sort"] = self._sort
d.update(self._extra)
if self._source not in (None, {}):
d["_source"] = self._source
if self._highlight:
d["highlight"] = {"fields": self._highlight}
d["highlight"].update(self._highlight_opts)
if self._suggest:
d["suggest"] = self._suggest
if self._script_fields:
d["script_fields"] = self._script_fields
d.update(kwargs)
return d
def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, "_response") and self._response.hits.total.relation == "eq":
return self._response.hits.total.value
es = get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return es.count(index=self._index, body=d, **self._params)["count"]
def execute(self, ignore_cache=False):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
:arg ignore_cache: if set to ``True``, consecutive calls will hit
ES, while cached result will be ignored. Defaults to `False`
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
self._response = self._response_class(
self, es.search(index=self._index, body=self.to_dict(), **self._params)
)
return self._response
def scan(self):
"""
Turn the search into a scan search and return a generator that will
iterate over all the documents matching the query.
Use ``params`` method to specify any additional arguments you with to
pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
"""
es = get_connection(self._using)
for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
yield self._get_result(hit)
def delete(self):
"""
delete() executes the query by delegating to delete_by_query()
"""
es = get_connection(self._using)
return AttrDict(
es.delete_by_query(index=self._index, body=self.to_dict(), **self._params)
)
class MultiSearch(Request):
"""
Combine multiple :class:`~elasticsearch_dsl.Search` objects into a single
request.
"""
def __init__(self, **kwargs):
super(MultiSearch, self).__init__(**kwargs)
self._searches = []
def __getitem__(self, key):
return self._searches[key]
def __iter__(self):
return iter(self._searches)
def _clone(self):
ms = super(MultiSearch, self)._clone()
ms._searches = self._searches[:]
return ms
def add(self, search):
"""
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
"""
ms = self._clone()
ms._searches.append(search)
return ms
def to_dict(self):
out = []
for s in self._searches:
meta = {}
if s._index:
meta["index"] = s._index
meta.update(s._params)
out.append(meta)
out.append(s.to_dict())
return out
def execute(self, ignore_cache=False, raise_on_error=True):
"""
Execute the multi search request and return a list of search results.
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
responses = es.msearch(
index=self._index, body=self.to_dict(), **self._params
)
out = []
for s, r in zip(self._searches, responses["responses"]):
if r.get("error", False):
if raise_on_error:
raise TransportError("N/A", r["error"]["type"], r["error"])
r = None
else:
r = Response(s, r)
out.append(r)
self._response = out
return self._response
| 31.104167 | 105 | 0.560065 | [
"Apache-2.0"
] | cfpb/elasticsearch-dsl-py | elasticsearch_dsl/search.py | 25,381 | Python |
"""Test the code generating time series with the order totals.
Unless otherwise noted, each `time_step` is 60 minutes long implying
12 time steps per day (i.e., we use `LONG_TIME_STEP` by default).
"""
import datetime
import pandas as pd
import pytest
from tests import config as test_config
from urban_meal_delivery import config
@pytest.fixture
def good_predict_at():
"""A `predict_at` within `START`-`END` and ...
... a long enough history so that either `SHORT_TRAIN_HORIZON`
or `LONG_TRAIN_HORIZON` works.
"""
return datetime.datetime(
test_config.END.year,
test_config.END.month,
test_config.END.day,
test_config.NOON,
0,
)
@pytest.fixture
def bad_predict_at():
"""A `predict_at` within `START`-`END` but ...
... not a long enough history so that both `SHORT_TRAIN_HORIZON`
and `LONG_TRAIN_HORIZON` do not work.
"""
predict_day = test_config.END - datetime.timedelta(weeks=6, days=1)
return datetime.datetime(
predict_day.year, predict_day.month, predict_day.day, test_config.NOON, 0,
)
class TestMakeHorizontalTimeSeries:
"""Test the `OrderHistory.make_horizontal_ts()` method."""
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_wrong_pixel(self, order_history, good_predict_at, train_horizon):
"""A `pixel_id` that is not in the `grid`."""
with pytest.raises(LookupError):
order_history.make_horizontal_ts(
pixel_id=999_999,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_are_series(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The time series come as a `pd.Series`."""
result = order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert isinstance(training_ts, pd.Series)
assert training_ts.name == 'n_orders'
assert isinstance(actuals_ts, pd.Series)
assert actuals_ts.name == 'n_orders'
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series must be a multiple of `7` ...
... whereas the time series with the actual order counts has only `1` value.
"""
result = order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert len(training_ts) == 7 * train_horizon
assert len(actuals_ts) == 1
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_frequency_is_number_of_weekdays(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The `frequency` must be `7`."""
result = order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
_, frequency, _ = result # noqa:WPS434
assert frequency == 7
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_no_long_enough_history1(
self, order_history, good_pixel_id, bad_predict_at, train_horizon,
):
"""If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_horizontal_ts(
pixel_id=good_pixel_id,
predict_at=bad_predict_at,
train_horizon=train_horizon,
)
def test_no_long_enough_history2(
self, order_history, good_pixel_id, good_predict_at,
):
"""If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_horizontal_ts(
pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999,
)
class TestMakeVerticalTimeSeries:
"""Test the `OrderHistory.make_vertical_ts()` method."""
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_wrong_pixel(self, order_history, good_predict_at, train_horizon):
"""A `pixel_id` that is not in the `grid`."""
with pytest.raises(LookupError):
order_history.make_vertical_ts(
pixel_id=999_999,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_are_series(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The time series come as `pd.Series`."""
result = order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert isinstance(training_ts, pd.Series)
assert training_ts.name == 'n_orders'
assert isinstance(actuals_ts, pd.Series)
assert actuals_ts.name == 'n_orders'
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks.
The time series with the actual order counts always holds one observation
per time step of a day.
"""
result = order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert len(training_ts) == 7 * n_daily_time_steps * train_horizon
assert len(actuals_ts) == n_daily_time_steps
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_frequency_is_number_number_of_weekly_time_steps(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The `frequency` is the number of weekly time steps."""
result = order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=train_horizon,
)
_, frequency, _ = result # noqa:WPS434
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert frequency == 7 * n_daily_time_steps
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_no_long_enough_history1(
self, order_history, good_pixel_id, bad_predict_at, train_horizon,
):
"""If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=bad_predict_at.date(),
train_horizon=train_horizon,
)
def test_no_long_enough_history2(
self, order_history, good_pixel_id, good_predict_at,
):
"""If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_vertical_ts(
pixel_id=good_pixel_id,
predict_day=good_predict_at.date(),
train_horizon=999,
)
class TestMakeRealTimeTimeSeries:
"""Test the `OrderHistory.make_realtime_ts()` method."""
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_wrong_pixel(self, order_history, good_predict_at, train_horizon):
"""A `pixel_id` that is not in the `grid`."""
with pytest.raises(LookupError):
order_history.make_realtime_ts(
pixel_id=999_999,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_are_series(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The time series come as `pd.Series`."""
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
assert isinstance(training_ts, pd.Series)
assert training_ts.name == 'n_orders'
assert isinstance(actuals_ts, pd.Series)
assert actuals_ts.name == 'n_orders'
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length1(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks; however, this assertion only holds if
we predict the first `time_step` of the day.
The time series with the actual order counts always holds `1` value.
"""
predict_at = datetime.datetime(
good_predict_at.year,
good_predict_at.month,
good_predict_at.day,
config.SERVICE_START,
0,
)
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id, predict_at=predict_at, train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert len(training_ts) == 7 * n_daily_time_steps * train_horizon
assert len(actuals_ts) == 1
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_time_series_have_correct_length2(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The length of a training time series is the product of the ...
... weekly time steps (i.e., product of `7` and the number of daily time steps)
and the `train_horizon` in weeks; however, this assertion only holds if
we predict the first `time_step` of the day. Predicting any other `time_step`
means that the training time series becomes longer by the number of time steps
before the one being predicted.
The time series with the actual order counts always holds `1` value.
"""
assert good_predict_at.hour == test_config.NOON
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
training_ts, _, actuals_ts = result
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
n_time_steps_before = (
60 * (test_config.NOON - config.SERVICE_START) // test_config.LONG_TIME_STEP
)
assert (
len(training_ts)
== 7 * n_daily_time_steps * train_horizon + n_time_steps_before
)
assert len(actuals_ts) == 1
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_frequency_is_number_number_of_weekly_time_steps(
self, order_history, good_pixel_id, good_predict_at, train_horizon,
):
"""The `frequency` is the number of weekly time steps."""
result = order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=good_predict_at,
train_horizon=train_horizon,
)
_, frequency, _ = result # noqa:WPS434
n_daily_time_steps = (
60
* (config.SERVICE_END - config.SERVICE_START)
// test_config.LONG_TIME_STEP
)
assert frequency == 7 * n_daily_time_steps
@pytest.mark.parametrize('train_horizon', test_config.TRAIN_HORIZONS)
def test_no_long_enough_history1(
self, order_history, good_pixel_id, bad_predict_at, train_horizon,
):
"""If the `predict_at` day is too early in the `START`-`END` horizon ...
... the history of order totals is not long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_realtime_ts(
pixel_id=good_pixel_id,
predict_at=bad_predict_at,
train_horizon=train_horizon,
)
def test_no_long_enough_history2(
self, order_history, good_pixel_id, good_predict_at,
):
"""If the `train_horizon` is longer than the `START`-`END` horizon ...
... the history of order totals can never be long enough.
"""
with pytest.raises(RuntimeError):
order_history.make_realtime_ts(
pixel_id=good_pixel_id, predict_at=good_predict_at, train_horizon=999,
)
| 35.6425 | 88 | 0.644666 | [
"MIT"
] | webartifex/urban-meal-delivery | tests/forecasts/timify/test_make_time_series.py | 14,257 | Python |
"""
This file is part of the FJournal Project.
Copyright © 2019-2020, Daniele Penazzo. All Rights Reserved.
The use of this code is governed by the MIT license attached.
See the LICENSE file for the full license.
Created on: 2020-07-10
Author: Penaz
"""
from tkinter import ttk
import tkinter as tk
from models import Meal
class AddMealPopup(ttk.Frame):
"""
Defines a popup for adding meals
"""
def __init__(self, master=None, session=None):
"""
Constructor of the class
"""
super().__init__(master)
self.master = master
self.grid(row=0, column=0)
self.session = session
self.mealname = tk.StringVar()
self.create_widgets()
def create_widgets(self):
"""
Creates the widgets for the popup
"""
self.meallbl = ttk.Label(self, text="Meal Name")
self.meallbl.grid(row=0, column=0)
self.mealinput = ttk.Entry(self, textvariable=self.mealname)
self.mealinput.grid(row=0, column=1)
self.addbtn = ttk.Button(self,
text="Confirm",
command=self.add_meal)
self.addbtn.grid(row=1, column=0, columnspan=2)
def add_meal(self):
"""
Opens the Add Meal popup
"""
meal = Meal(name=self.mealname.get())
self.session.add(meal)
self.session.commit()
self.master.destroy()
| 27.301887 | 68 | 0.595715 | [
"MIT"
] | Penaz91/fjournal | gui/addmealpopup.py | 1,448 | Python |
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
#
# Adafruit PCF8523 RTC Library documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 11 21:37:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.intersphinx"]
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["adafruit_bus_device", "adafruit_register"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit PCF8523 RTC Library"
copyright = "2016, Philip Moyer"
author = "Philip Moyer"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Adafruit PCF8523 RTC Library v1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
# htmlhelp_basename = 'AdafruitsPCF8523RTCLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary.tex",
"Adafruit PCF8523 RTC Library Documentation",
"Philip Moyer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitPCF8523RTCLibrary23rtclibrary",
"Adafruit PCF8523 RTC Library Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitPCF8523RTCLibrary",
"Adafruit PCF8523 RTC Library Documentation",
author,
"AdafruitPCF8523RTCLibrary",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"CircuitPython": ("https://docs.circuitpython.org/en/latest/", None),
}
| 29.598945 | 85 | 0.700214 | [
"MIT",
"MIT-0",
"Unlicense"
] | adafruit/Adafruit_MicroPython_PCF8523 | docs/conf.py | 11,218 | Python |
import unittest
import numpy
import pytest
import cupy
import cupy.core._accelerator as _acc
from cupy.core import _cub_reduction
from cupy import testing
@testing.gpu
class TestSearch(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_tie(self, xp, dtype):
a = xp.array([0, 1, 2, 3, 0, 5], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmin(axis=1)
# This class compares CUB results against NumPy's
# TODO(leofang): test axis after support is added
@testing.parameterize(*testing.product({
'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
'order_and_axis': (('C', -1), ('C', None), ('F', 0), ('F', None)),
'backend': ('device', 'block'),
}))
@testing.gpu
@unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled')
class TestCubReduction(unittest.TestCase):
def setUp(self):
self.order, self.axis = self.order_and_axis
self.old_routine_accelerators = _acc.get_routine_accelerators()
self.old_reduction_accelerators = _acc.get_reduction_accelerators()
if self.backend == 'device':
if self.axis is not None:
raise unittest.SkipTest('does not support')
_acc.set_routine_accelerators(['cub'])
_acc.set_reduction_accelerators([])
elif self.backend == 'block':
_acc.set_routine_accelerators([])
_acc.set_reduction_accelerators(['cub'])
def tearDown(self):
_acc.set_routine_accelerators(self.old_routine_accelerators)
_acc.set_reduction_accelerators(self.old_reduction_accelerators)
@testing.for_dtypes('bhilBHILefdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cub_argmin(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order == 'C':
a = xp.ascontiguousarray(a)
else:
a = xp.asfortranarray(a)
if xp is numpy:
return a.argmin(axis=self.axis)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
if self.backend == 'device':
func_name = 'cupy.core._routines_statistics.cub.'
func_name += 'device_reduce'
with testing.AssertFunctionIsCalled(func_name, return_value=ret):
a.argmin(axis=self.axis)
elif self.backend == 'block':
# this is the only function we can mock; the rest is cdef'd
func_name = 'cupy.core._cub_reduction.'
func_name += '_SimpleCubReductionKernel_get_cached_function'
func = _cub_reduction._SimpleCubReductionKernel_get_cached_function
if self.axis is not None and len(self.shape) > 1:
times_called = 1 # one pass
else:
times_called = 2 # two passes
with testing.AssertFunctionIsCalled(
func_name, wraps=func, times_called=times_called):
a.argmin(axis=self.axis)
# ...then perform the actual computation
return a.argmin(axis=self.axis)
@testing.for_dtypes('bhilBHILefdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cub_argmax(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order == 'C':
a = xp.ascontiguousarray(a)
else:
a = xp.asfortranarray(a)
if xp is numpy:
return a.argmax(axis=self.axis)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
if self.backend == 'device':
func_name = 'cupy.core._routines_statistics.cub.'
func_name += 'device_reduce'
with testing.AssertFunctionIsCalled(func_name, return_value=ret):
a.argmax(axis=self.axis)
elif self.backend == 'block':
# this is the only function we can mock; the rest is cdef'd
func_name = 'cupy.core._cub_reduction.'
func_name += '_SimpleCubReductionKernel_get_cached_function'
func = _cub_reduction._SimpleCubReductionKernel_get_cached_function
if self.axis is not None and len(self.shape) > 1:
times_called = 1 # one pass
else:
times_called = 2 # two passes
with testing.AssertFunctionIsCalled(
func_name, wraps=func, times_called=times_called):
a.argmax(axis=self.axis)
# ...then perform the actual computation
return a.argmax(axis=self.axis)
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['argmin', 'argmax'],
'is_module': [True, False],
'shape': [(3, 4), ()],
}))
class TestArgMinMaxDtype(unittest.TestCase):
@testing.for_dtypes(
dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64],
name='result_dtype')
@testing.for_all_dtypes(name='in_dtype')
def test_argminmax_dtype(self, in_dtype, result_dtype):
a = testing.shaped_random(self.shape, cupy, in_dtype)
if self.is_module:
func = getattr(cupy, self.func)
y = func(a, dtype=result_dtype)
else:
func = getattr(a, self.func)
y = func(dtype=result_dtype)
assert y.shape == ()
assert y.dtype == result_dtype
@testing.parameterize(
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)},
{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)},
)
@testing.gpu
class TestWhereTwoArrays(unittest.TestCase):
@testing.for_all_dtypes_combination(
names=['cond_type', 'x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
# Almost all values of a matrix `shaped_random` makes are not zero.
# To make a sparse matrix, we need multiply `m`.
cond = testing.shaped_random(self.cond_shape, xp, cond_type) * m
x = testing.shaped_random(self.x_shape, xp, x_type, seed=0)
y = testing.shaped_random(self.y_shape, xp, y_type, seed=1)
return xp.where(cond, x, y)
@testing.parameterize(
{'cond_shape': (2, 3, 4)},
{'cond_shape': (4,)},
{'cond_shape': (2, 3, 4)},
{'cond_shape': (3, 4)},
)
@testing.gpu
class TestWhereCond(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_where_cond(self, xp, dtype):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
cond = testing.shaped_random(self.cond_shape, xp, dtype) * m
return xp.where(cond)
@testing.gpu
class TestWhereError(unittest.TestCase):
def test_one_argument(self):
for xp in (numpy, cupy):
cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_)
x = testing.shaped_random((2, 3, 4), xp, xp.int32)
with pytest.raises(ValueError):
xp.where(cond, x)
@testing.parameterize(
{'array': numpy.random.randint(0, 2, (20,))},
{'array': numpy.random.randn(3, 2, 4)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_nonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.17.0')
class TestNonzeroZeroDimension(unittest.TestCase):
@testing.for_all_dtypes()
def test_nonzero(self, dtype):
for xp in (numpy, cupy):
array = xp.array(self.array, dtype=dtype)
with pytest.raises(DeprecationWarning):
xp.nonzero(array)
@testing.parameterize(
{'array': numpy.random.randint(0, 2, (20,))},
{'array': numpy.random.randn(3, 2, 4)},
{'array': numpy.array(0)},
{'array': numpy.array(1)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestFlatNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_flatnonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.flatnonzero(array)
@testing.parameterize(
{'array': numpy.random.randint(0, 2, (20,))},
{'array': numpy.random.randn(3, 2, 4)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestArgwhere(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_argwhere(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.argwhere(array)
@testing.parameterize(
{'array': cupy.array(1)},
)
@testing.gpu
class TestArgwhereZeroDimension(unittest.TestCase):
def test_argwhere(self):
with testing.assert_warns(DeprecationWarning):
return cupy.nonzero(self.array)
@testing.gpu
class TestNanArgMin(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.gpu
class TestNanArgMax(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.gpu
@testing.parameterize(*testing.product(
{'bins': [
[],
[0, 1, 2, 4, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[0.0, 1.0, 2.5, 4.0, 10.0],
[-1.0, 1.0, 2.5, 4.0, 20.0],
[1.5, 2.5, 4.0, 6.0],
[float('-inf'), 1.5, 2.5, 4.0, 6.0],
[1.5, 2.5, 4.0, 6.0, float('inf')],
[float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')],
[0.0, 1.0, 1.0, 4.0, 4.0, 10.0],
[0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0],
],
'side': ['left', 'right'],
'shape': [(), (10,), (6, 3, 3)]})
)
class TestSearchSorted(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_searchsorted(self, xp, dtype):
x = testing.shaped_arange(self.shape, xp, dtype)
bins = xp.array(self.bins)
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
@testing.parameterize(
{'side': 'left'},
{'side': 'right'})
class TestSearchSortedNanInf(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_searchsorted_nanbins(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([0, 1, 2, 4, 10, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('nan')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan_last(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[-1] = float('nan')
bins = xp.array([0, 1, 2, 4, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan_last_repeat(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[-1] = float('nan')
bins = xp.array([0, 1, 2, float('nan'), float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_all_nans(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[-1] = float('nan')
bins = xp.array([float('nan'), float('nan'), float('nan'),
float('nan'), float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_inf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_minf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('-inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
class TestSearchSortedInvalid(unittest.TestCase):
# Cant test unordered bins due to numpy undefined
# behavior for searchsorted
def test_searchsorted_ndbins(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([[10, 4], [2, 1], [7, 8]])
with pytest.raises(ValueError):
xp.searchsorted(bins, x)
@testing.gpu
class TestSearchSortedWithSorter(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_sorter(self, xp):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([3, 2, 1, 4, 0])
y = xp.searchsorted(bins, x, sorter=sorter)
return y,
def test_invalid_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([0])
with pytest.raises(ValueError):
xp.searchsorted(bins, x, sorter=sorter)
def test_nonint_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([], dtype=xp.float64)
with pytest.raises(TypeError):
xp.searchsorted(bins, x, sorter=sorter)
| 35.495212 | 79 | 0.625082 | [
"MIT"
] | daxiongshu/cupy | tests/cupy_tests/sorting_tests/test_search.py | 25,947 | Python |
import mock
import json
from collections import OrderedDict
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from apps.common.tests import GetResponseMixin
from apps.issue.models import Issue, IssueStatus, IssueExtValue
from apps.user_group.models import UserGroupType
from gated_launch_backend.settings_test import JIRA_API_URL, JIRA_ZC_USER
class BusinessModulesRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/issue/fixtures/tests/business_modules.json"
]
def test_list_business_modules(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='normal_user'))
url = reverse('businessmodules-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 2), ('next', None), ('previous', None),
('results',
[OrderedDict([('id', 2), ('name', 'parking car'),
('level', 1), ('parent', 'parking'),
('parentId', 1), ('disabled', True)]),
OrderedDict([('id', 1), ('name', 'parking'),
('level', 0), ('parent', None),
('parentId', None),
('disabled', False)])])]))]))
class PhoneBrandsRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/issue/fixtures/tests/phone_brands.json"
]
def test_list_business_modules(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='normal_user'))
url = reverse('phonebrands-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 1), ('next', None), ('previous', None),
('results', [OrderedDict([('id', 1),
('name', 'Huawei P8')])])]))]))
class RegionsRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/issue/fixtures/tests/regions.json"
]
def test_list_business_modules(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='normal_user'))
url = reverse('regions-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(response.data,
OrderedDict([('status', 200), ('msg', '成功'),
('data', OrderedDict([('total', 1), ('next', None), ('previous', None),
('results', [OrderedDict([('id', 1),
('name', 'North')])])]))]))
class IssuesRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/app/fixtures/tests/app_components.json",
"apps/task_manager/fixtures/tests/task_status.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
"apps/task_manager/fixtures/tests/info_api_test_snapshotinnerstrategy.json",
"apps/issue/fixtures/tests/report_sources.json",
"apps/issue/fixtures/tests/issues.json",
"apps/usage/fixtures/tests/usage_eventtype.json",
"apps/usage/fixtures/tests/usage_eventtracking.json",
"apps/usage/fixtures/tests/usage_property.json"
]
def test_filter_issues_by_contain_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'creator': 'normal_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'normal_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'admin_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'admin_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'creator': 'app_owner_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'creator': 'app_owner_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'reportSource': 'weixin', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'reportSource': '四大区运营', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'reportSource': 'no_source', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_jira_id(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'jiraId': 'CC-157', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'jiraId': 'AAABBB', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'jiraId': 'AA-170', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_department(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'department': '网科集团', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '网科', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '质量管理部', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '质量', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '地产集团', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '不存在部门', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'department': '地产集团', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '地产', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '工程部', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '工', 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'department': '不存在部门', 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '一般', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '不紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 2)
def test_filter_issues_by_status_order(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='admin_user'))
# set app_owner_user as owner of app 6
url = reverse('usergroups-list')
data = {'type': UserGroupType.OWNER, 'appId': 6}
response = self.client.get(url, data, format='json')
group_id = response.data['data']['results'][0]['id']
url = reverse('usergroupmems-list', kwargs={'group_id': group_id})
data = {'account': 'app_owner_user'}
self.client.post(url, data, format='json')
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'statusNameOrder': '关闭,处理中,待处理,挂起,验证', 'appId': 6})
expect_order = ['关闭', '处理中', '待处理', '挂起', '验证']
# remove duplication
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
response = self.client.get(reverse('issues-list'), {'statusNameOrder': '挂起,验证,关闭,处理中,待处理', 'appId': 6})
expect_order = ['挂起', '验证', '关闭', '处理中', '待处理']
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
def test_filter_issues_by_score_and_createdTime_startDate_endDate(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2, 'createdTime': '2017-07-01'})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2, 'createdTime': '2017-06-29'})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 1, 'createdTime': '2017-07-01'})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'appId': 6, 'startDate': '2017-06-29', 'endDate': '2017-10-01'})
self.assertEqual(self._get_response_total(response), 7)
response = self.client.get(reverse('issues-list'), {'appId': 6, 'startDate': '2017-06-29', 'endDate': '2017-08-01'})
self.assertEqual(self._get_response_total(response), 5)
def test_filter_issues_by_multiple_score_value(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-list')
# appId 1
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
url_query = url + "?score=4&score=5&appId=1"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
url_query = url + "?score=4&score=5&appId=1&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
# appId 2
response = self.client.get(reverse('issues-list'), {'score': 4, 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'score': 5, 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
url_query = url + "?score=4&score=5&appId=2&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 1)
def test_create_issues_with_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['priority'], '紧急')
# no priority field
response = self.client.post(reverse('issues-list'),
{'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['priority'], '一般')
# check result in backend
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'priority': '一般', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
def test_create_issues_with_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['score'], '0')
# from weiXin: with reportSource and score field and reportSource field equal '四大区运营'
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb', 'reportSource': '四大区运营',
'score': '非常严重'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['score'], '5')
# from weiXin: with reportSource field and no score filed and reportSource field equal '四大区运营'
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb', 'reportSource': '四大区运营'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['score'], '4')
def test_create_issues_with_updated_after(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': '1987-01-01 10:13:20'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 2)
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': '2030-01-01 10:13:20'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 0)
date_time = '2017-06-29 20:25:00'
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': date_time})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 0)
# update 1 issue
issue = Issue.objects.get(pk=1)
issue.save()
# filter with same updated time again
response = self.client.get(reverse('issues-list'),
{'appId': 1, 'updatedAfter': date_time})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_response_total(response), 1)
def test_update_issues_with_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.patch(url, {'priority': '紧急'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['priority'], '紧急')
# check result in backend
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issues-list'), {'priority': '不紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
def test_update_issues_operator_no_jira(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 2})
response = self.client.patch(url, {'operator': 'normal_user'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['operator'], 'manong')
def test_update_issues_operator_exist_jira(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.patch(url, {'operator': 'normal_user'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['operator'], 'normal_user')
def test_issue_stats_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'creatorId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 7)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 9)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_report_source_and_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营', 'creatorId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 6)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_report_source_and_app(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营', 'appId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 1)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 1)
def test_issue_stats_report_source_and_task(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'reportSource': '四大区运营', 'taskId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['totalIssues'], 8)
self.assertEqual(response.data['data']['results']['statusStats']['closed'], 2)
def test_issue_stats_test_filter_start_end_time(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'startTime': '2017-01-01'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 10)
data = {'startTime': '2017-10-01', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 2)
data = {'startTime': '2017-10-01', 'appId': 6, 'endTime': '2017-10-10'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 2)
data = {'startTime': '2017-09-01', 'appId': 6, 'endTime': '2017-09-30'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
data = {'startTime': '2017-11-01', 'appId': 6, 'endTime': '2017-11-30'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
def test_issue_stats_test_valid_issues(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
response = self.client.get(url, format='json')
self.assertEqual(response.data['data']['validIssues'], 4)
data = {'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['validIssues'], 1)
data = {'endTime': '2017-09-30'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['validIssues'], 3)
def test_issue_stats_test_filter_issue_from(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issuestats')
data = {'issueFrom': 'local'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'remote'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'fake_one'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
data = {'issueFrom': 'local', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'remote', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 1)
data = {'issueFrom': 'local', 'appId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
data = {'issueFrom': 'remote', 'appId': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['totalIssues'], 0)
def test_create_issues_with_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['extFields'], {'手机型号': '华为', '广场': '通州万达'})
def test_can_not_create_issues_with_undefined_extended_fields(self):
# 不能传入未定义的字段
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'场地': '通州万达', '手机型号': '华为'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
OrderedDict([('status', 400),
('msg', 'Not found: IssueExtField matching query does not exist.')])
def test_can_not_create_issues_without_must_have_extended_fields(self):
# 必须的字段一定要有
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': False, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达'}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
OrderedDict([('status', 400), ('msg', "缺少以下必须扩展字段: {'手机型号'}")])
def test_update_issues_with_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
url = reverse('issues-detail', kwargs={'pk': issue_id})
response = self.client.patch(url, {'extFields': {'手机型号': '苹果'}},
format='json')
# 会全量更新扩展字段
self.assertEqual(response.data['data']['extFields'], {'手机型号': '苹果'})
def test_get_issue_extended_field_value_from_model_obj(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
issue_obj = Issue.objects.get(id=issue_id)
self.assertEqual('通州万达', issue_obj.get_ext_field_value('广场'))
self.assertEqual('华为', issue_obj.get_ext_field_value('手机型号'))
def test_set_issue_extended_field_value_from_model_obj(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
issue_obj = Issue.objects.get(id=issue_id)
self.assertTrue(issue_obj.set_ext_field_value('广场', '瞎写的广场'))
self.assertEqual('瞎写的广场', issue_obj.get_ext_field_value('广场'))
# 不影响其他字段
self.assertEqual('华为', issue_obj.get_ext_field_value('手机型号'))
self.assertTrue(issue_obj.set_ext_field_value('手机型号', '瞎写的手机型号'))
self.assertEqual('瞎写的手机型号', issue_obj.get_ext_field_value('手机型号'))
self.assertFalse(issue_obj.set_ext_field_value('瞎写的字段', 'aaa'))
def test_delete_issue_will_delete_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场'})
self.client.post(url, {'name': '手机型号', 'isOptional': True, 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
url = reverse('issues-detail', kwargs={'pk': issue_id})
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
self.assertEqual(0, IssueExtValue.objects.filter(issue_id=issue_id).count())
def test_update_issues_will_check_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场', 'isOptional': False})
self.client.post(url, {'name': '手机型号', 'default': 'iPhone', 'type': 'string'})
response = self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
issue_id = response.data['data']['id']
url = reverse('issues-detail', kwargs={'pk': issue_id})
response = self.client.patch(url, {'extFields': {'手机型号': '苹果'}},
format='json')
self.assertEqual(response.data['status'], status.HTTP_400_BAD_REQUEST)
def test_filter_issues_with_extended_fields(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueextfields-list', kwargs={'task_id': 1})
self.client.post(url, {'name': '广场', 'isOptional': False})
self.client.post(url, {'name': '手机型号', 'default': 'iPhone', 'type': 'string'})
self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '华为'}},
format='json')
self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '通州万达', '手机型号': '苹果'}},
format='json')
self.client.post(reverse('issues-list'),
{'priority': '紧急', 'appId': 1, 'taskId': 1,
'statusId': 1, 'title': 'aaaa', 'detail': 'bbbb',
'extFields': {'广场': '大望路万达', '手机型号': '华为'}},
format='json')
url = reverse('issues-list')
data = {'广场': '大望路万达', 'appId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'广场': '大望路万达', 'appId': 6}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 0)
data = {'广场': '通州万达', 'appId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'手机型号': '华为', 'appId': 1, 'taskId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'手机型号': '华为', '广场': '通州万达', 'appId': 1, 'taskId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'手机型号': '华为', '广场': '大望路万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'手机型号': '苹果', '广场': '大望路万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 0)
data = {'广场': '大望路万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 1)
data = {'手机型号': '华为'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'广场': '通州万达'}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
# check in pagination condition
data = {'广场': '通州万达', 'pageSize': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
data = {'广场': '通州万达', 'pageSize': 1, 'page': 2}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['total'], 2)
def test_issue_component(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['componentName'], '技术支持')
url = reverse('issues-detail', kwargs={'pk': 3})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['componentName'], '飞凡众测')
def test_issue_operator_no_jira_link(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 8})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], '')
self.assertEqual(response.data['data']['operator'], 'manong')
def test_issue_operator_exist_jira_link(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issues-detail', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
self.assertEqual(response.data['data']['operator'], 'mingong')
def mocked_zc_set_jira_status(*args, **kwargs):
return '待处理', ['status changed!']
def mocked_jira_issue_is_avaliable(*args, **kwargs):
return True
# This method will be used by the mock to replace requests.post
def mocked_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.data = json.dumps(self.json_data)
def json(self):
return self.json_data
if args[0] == JIRA_API_URL:
return MockResponse({'data': {'status': '待处理', 'jiraId': 'AA-157', 'operator': 'dalingdao'},
'status': 200}, 200)
return MockResponse(None, 404)
class IssuesJiraRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/task_manager/fixtures/tests/task_status.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
"apps/task_manager/fixtures/tests/info_api_test_snapshotinnerstrategy.json",
"apps/issue/fixtures/tests/report_sources.json",
"apps/issue/fixtures/tests/issues.json",
]
def setUp(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_comment_with_empty_jira_info(self, mock_obj_1, mock_obj_2):
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'second comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "second comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
def test_update_jira_comment_with_no_jira(self, mock_obj):
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 2}),
{'conclusion': '验证不通过'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['status'], "处理中")
issue_with_pk_2 = Issue.objects.get(pk=2)
self.assertEqual(issue_with_pk_2.status.name, "处理中")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 2}),
{'conclusion': '验证通过'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['status'], "关闭")
issue_with_pk_2 = Issue.objects.get(pk=2)
self.assertEqual(issue_with_pk_2.status.name, "关闭")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_comment_with_jira_info_and_no_comments(self, mock_obj_1, mock_obj_2):
issue_with_pk_1 = Issue.objects.get(pk=1)
issue_with_pk_1.other = """{"phoneBrand": "华为 p8", "area": "四大区", "业务模块": "不知道写啥"}"""
issue_with_pk_1.save()
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'pass', 'comment': 'second comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "second comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_comment_with_jira_info_and_comments(self, mock_obj_1, mock_obj_2):
issue_with_pk_1 = Issue.objects.get(pk=1)
issue_with_pk_1.other = """{"phoneBrand": "华为 p8", "area": "四大区", "业务模块": "不知道写啥",
"comments": [{"wanxin": "app_owner_user", "email": "[email protected]",
"name": "", "info": "presetting comment", "startTime": "", "endTime": ""}]}"""
issue_with_pk_1.save()
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'pass', 'comment': 'second comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "second comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
@mock.patch('apps.issue.views.zc_set_jira_status', side_effect=mocked_zc_set_jira_status)
@mock.patch('apps.issue.views.jira_issue_is_avaliable', side_effect=mocked_jira_issue_is_avaliable)
def test_update_jira_status(self, mock_obj_1, mock_obj_2):
issue_with_pk_1 = Issue.objects.get(pk=1)
issue_with_pk_1.status = IssueStatus.objects.get(name='验证')
issue_with_pk_1.save()
response = self.client.post(reverse('issues-jiracomment', kwargs={'pk': 1}),
{'conclusion': 'fail', 'comment': 'first comment'})
self.assertEqual(response.data['status'], 200)
self.assertEqual(response.data['data']['comments'][0]['info'], "first comment")
self.assertEqual(response.data['data']['comments'][0]['wanxin'], "app_owner_user")
self.assertEqual(response.data['data']['changeLog'][0], "status changed!")
issue_with_pk_1.refresh_from_db()
self.assertEqual(issue_with_pk_1.status.name, '待处理')
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_create_jira(self, mock_post):
url = reverse('issuetojira')
data = {'issueId': 1}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
data = {'issueId': 8}
response = self.client.get(url, data, format='json')
self.assertEqual(response.data['data']['jiraId'], 'AA-157')
def test_jira_to_zc_jira_not_exist(self):
url = reverse('jiratoissue')
data = {
"issue": {
"key": "CC-15"
},
"user": {
"name": "zhongce"
}
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['issueId'], None)
self.assertEqual(response.data['data']['jiraId'], 'CC-15')
def test_jira_to_zc_user_is_zhongce(self):
url = reverse('jiratoissue')
data = {
"issue": {
"key": "CC-157"
},
"user": {
"name": JIRA_ZC_USER
}
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['issueId'], 1)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
def test_jira_to_zc_user_is_not_zhongce(self):
url = reverse('jiratoissue')
data = {
"issue": {
"key": "CC-157"
},
"user": {
"name": "zhaochunyan7"
}
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['issueId'], 1)
self.assertEqual(response.data['data']['jiraId'], 'CC-157')
def test_generate_change_log_jira_not_exist_update_priority(self):
url = reverse('issues-detail', kwargs={'pk': 8})
response = self.client.patch(url, {'priority': '紧急'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
zc_change_logs = json.loads(response.data['data']['zcChangeLogs'])
zc_change_logs[0].pop('created')
self.assertEqual(zc_change_logs, [{'wanxin': 'app_owner_user',
'items': [{'field': 'priority', 'toString': '紧急', 'fromString': '不紧急'}],
'author': ''}])
def test_generate_change_log_jira_not_exist_update_images(self):
url = reverse('issues-detail', kwargs={'pk': 8})
response = self.client.patch(url, {'images': ['aabbceadfdfdfdfdfdf']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
zc_change_logs = json.loads(response.data['data']['zcChangeLogs'])
zc_change_logs[0].pop('created')
self.assertEqual(zc_change_logs, [{'wanxin': 'app_owner_user',
'items': [{'field': 'images',
'toString': "['aabbceadfdfdfdfdfdf']",
'fromString': '[]'}],
'author': ''}])
@mock.patch('requests.post', side_effect=mocked_requests_post)
def test_generate_change_log_create_jira(self, mock_post):
url = reverse('issuetojira')
data = {'issueId': 8}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['data']['jiraId'], 'AA-157')
issue = Issue.objects.get(pk=8)
zc_change_logs = json.loads(issue.zc_change_logs)
zc_change_logs[0].pop('created')
self.assertEqual(zc_change_logs, [{'author': '', 'wanxin': 'app_owner_user',
'items': [{'fromString': '', 'toString': 'AA-157', 'field': 'jira link'}]}])
class IssuesLiteRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/task_manager/fixtures/tests/task_status.json",
"apps/task_manager/fixtures/tests/info_api_test_graytask.json",
"apps/task_manager/fixtures/tests/info_api_test_snapshotinnerstrategy.json",
"apps/issue/fixtures/tests/report_sources.json",
"apps/issue/fixtures/tests/issues.json",
]
def test_filter_issues_by_contain_creator(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'), {'creator': 'normal_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'normal_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'admin_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'admin_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'creator': 'app_owner_user', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issueslite-list'), {'creator': 'app_owner_', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_report_source(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'), {'reportSource': 'weixin', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'reportSource': '四大区运营', 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'reportSource': 'no_source', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
def test_filter_issues_by_priority(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issues-list'), {'priority': '紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '一般', 'appId': 1})
self.assertEqual(self._get_response_total(response), 0)
response = self.client.get(reverse('issues-list'), {'priority': '不紧急', 'appId': 1})
self.assertEqual(self._get_response_total(response), 2)
def test_filter_issues_by_status_order(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='admin_user'))
# set app_owner_user as owner of app 6
url = reverse('usergroups-list')
data = {'type': UserGroupType.OWNER, 'appId': 6}
response = self.client.get(url, data, format='json')
group_id = response.data['data']['results'][0]['id']
url = reverse('usergroupmems-list', kwargs={'group_id': group_id})
data = {'account': 'app_owner_user'}
self.client.post(url, data, format='json')
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'),
{'statusNameOrder': '关闭,处理中,待处理,挂起,验证', 'appId': 6})
expect_order = ['关闭', '处理中', '待处理', '挂起', '验证']
# remove duplication
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
response = self.client.get(reverse('issueslite-list'),
{'statusNameOrder': '挂起,验证,关闭,处理中,待处理', 'appId': 6})
expect_order = ['挂起', '验证', '关闭', '处理中', '待处理']
real_order = OrderedDict.fromkeys([item['statusName'] for item in response.data['data']['results']]).keys()
self.assertEqual(expect_order, list(real_order))
def test_created_time_order_when_filter_issues_by_status_order_created_time(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='admin_user'))
# set app_owner_user as owner of app 6
url = reverse('usergroups-list')
data = {'type': UserGroupType.OWNER, 'appId': 6}
response = self.client.get(url, data, format='json')
group_id = response.data['data']['results'][0]['id']
url = reverse('usergroupmems-list', kwargs={'group_id': group_id})
data = {'account': 'app_owner_user'}
self.client.post(url, data, format='json')
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
response = self.client.get(reverse('issueslite-list'),
{'statusNameOrder': '关闭,处理中,待处理,挂起,验证', 'appId': 6})
result = [(item['statusName'], item['createdAt']) for item in response.data['data']['results']]
self.assertEqual(result, [('关闭', '2017-06-29T18:25:11.681308'),
('处理中', '2017-06-29T18:25:11.681308'),
('待处理', '2017-06-29T18:25:11.681308'), ('待处理', '2017-10-01T18:22:11.681308'),
('待处理', '2017-10-01T18:25:11.681308'),
('挂起', '2017-06-29T18:25:11.681308'),
('验证', '2017-06-29T18:25:11.681308')])
def test_filter_issues_by_multiple_score_value(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
url = reverse('issueslite-list')
# appId 1
response = self.client.get(reverse('issueslite-list'), {'score': 5, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'score': 4, 'appId': 1})
self.assertEqual(self._get_response_total(response), 1)
url_query = url + "?score=4&score=5&appId=1"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
url_query = url + "?score=4&score=5&appId=1&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 2)
# appId 2
response = self.client.get(reverse('issueslite-list'), {'score': 4, 'appId': 2})
self.assertEqual(self._get_response_total(response), 1)
response = self.client.get(reverse('issueslite-list'), {'score': 5, 'appId': 2})
self.assertEqual(self._get_response_total(response), 0)
url_query = url + "?score=4&score=5&appId=2&score=300000000"
response = self.client.get(url_query)
self.assertEqual(self._get_response_total(response), 1)
def test_issues_response(self):
self.client.force_authenticate(user=get_user_model().objects.get(username='app_owner_user'))
# appId 1
response = self.client.get(reverse('issueslite-list'), {'score': 5, 'appId': 1})
self.assertEqual(response.data,
OrderedDict([('status', 200),
('msg', '成功'),
('data', OrderedDict([('total', 1), ('next', None),
('previous', None),
('results',
[OrderedDict([('id', 1),
('jiraId', 'CC-157'),
('statusName', '待处理'), ('title', ''),
('createdAt', '2017-06-29T18:25:11.681308'),
('other', '{"phoneNumber":"15921372222","order":"12345678","phoneType":"P9","version":"0928gray","square":"通州万达","summary":"example全量数据","description":"example全量数据","occurrenceTime":"2017-09-01T09:01:00.000+0800","area":"ALL","phoneBrand":"华为","severity":"次要","businessType":"停车"}'), # noqa
('score', 5), ('remindKSTFlag', False),
('remindPlatFlag', False)])])]))]))
| 53.859054 | 366 | 0.604131 | [
"MIT"
] | ycheng-aa/gated_launch_backend | apps/issue/testcases/integration/tests.py | 61,964 | Python |
# encoding = utf-8
"""
//
// AzureMonitorAddonForSplunk
//
// Copyright (c) Microsoft Corporation
//
// All rights reserved.
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the ""Software""), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is furnished
// to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
"""
import sys
from timewindow import put_time_window, put_time_checkpoint
from concurrent import futures
from subs import get_subscription_segment, get_resources, get_azure_environment, \
get_access_token, get_metrics_for_resources, get_secret_from_keyvault
MASK = '********'
def create_or_update_storage_password(self, props, logger):
'''
unencrypted password in inputs.conf, encrypt it and store as storagePassword
'''
try:
locale = 'reference'
storage_passwords = self.service.storage_passwords
if props['username'] in storage_passwords:
locale = 'delete'
storage_passwords.delete(props['username'])
except Exception as e:
logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'\
.format(e, locale))
try:
locale = 'create'
self.service.storage_passwords.create(props['password'], props['username'])
except Exception as e:
logger('ERROR', 'Error at locale {1} in create_or_update_storage_password: {0}'\
.format(e, locale))
def mask_id_and_key(self, name, logger):
'''
masks the app_id and app_key in inputs.conf
'''
kind, input_name = name.split('://')
item = self.service.inputs.__getitem__((input_name, kind))
try:
new_input = {
'vaultName': item.content.vaultName,
'SPNTenantID': item.content.SPNTenantID,
'SPNApplicationId': MASK,
'SPNApplicationKey': MASK,
'SubscriptionId': item.content.SubscriptionId,
'secretName': item.content.secretName,
'secretVersion': item.content.secretVersion,
'index': item.content.index,
'interval': item.content.interval,
'sourcetype': item.content.sourcetype
}
item.update(**new_input).refresh()
except Exception as e:
logger('ERROR', 'Error caught in mask_id_and_key: {0}'.format(e))
def get_or_store_secrets(self, inputs, logger):
'''
Either read existing encyrpted password or encrypt clear text password and store it
Either way, return a set of clear text credentials
'''
input_items = inputs.inputs.itervalues().next()
input_name = inputs.inputs.iterkeys().next()
credentials = {}
storage_passwords = self.service.storage_passwords
props_app_id = {}
props_app_id['username'] = 'AzureMonitorMetricsAppID-{0}'.format(input_name.replace(':','_'))
props_app_id['password'] = input_items.get("SPNApplicationId")
props_app_key = {}
props_app_key['username'] = 'AzureMonitorMetricsAppKey-{0}'.format(input_name.replace(':','_'))
props_app_key['password'] = input_items.get("SPNApplicationKey")
app_id = input_items.get("SPNApplicationId")
app_key = input_items.get("SPNApplicationKey")
if app_id is not None and app_key is not None:
try:
if ("AzureMonitorMetricsAppID" in storage_passwords) and (props_app_id['username'] not in storage_passwords):
# Create new unique storage password entry for AzureMonitorMetricsAppID based on input name
modify_storage_password(self, "AzureMonitorMetricsAppID", props_app_id['username'], logger)
if ("AzureMonitorMetricsAppKey" in storage_passwords) and (props_app_key['username'] not in storage_passwords):
# Create new unique storage password entry for AzureMonitorMetricsAppKey based on input name
modify_storage_password(self, "AzureMonitorMetricsAppKey", props_app_key['username'], logger)
if props_app_id['password'] == MASK:
app_id, app_key = get_app_id_and_key(self, props_app_id, props_app_key, logger)
else:
create_or_update_storage_password(self, props_app_id, logger)
create_or_update_storage_password(self, props_app_key, logger)
mask_id_and_key(self, input_name, logger)
except Exception as e:
logger('ERROR', 'Error caught in get_or_store_secrets: {0}'.format(e))
credentials['app_id'] = app_id
credentials['app_key'] = app_key
return credentials
def get_app_id_and_key(self, props_app_id, props_app_key, logger):
'''
get the encrypted app_id and app_key from storage_passwords
'''
storage_passwords = self.service.storage_passwords
if props_app_id['username'] not in storage_passwords:
raise KeyError('Did not find app_id {} in storage_passwords.'\
.format(props_app_id['username']))
if props_app_key['username'] not in storage_passwords:
raise KeyError('Did not find app_id {} in storage_passwords.'\
.format(props_app_key['username']))
app_id = ''
app_key = ''
try:
app_id = storage_passwords[props_app_id['username']].clear_password
app_key = storage_passwords[props_app_key['username']].clear_password
except Exception as e:
logger('ERROR', 'Error caught in get_app_id_and_key: {0}'.format(e))
return app_id, app_key
def modify_storage_password(self, old_username, new_username, logger):
logger('INFO', 'Updating storage password. Old username: {0}, new username: {1}'.format(old_username, new_username))
storage_passwords = self.service.storage_passwords
try:
password = storage_passwords[old_username].clear_password
storage_passwords.create(password, new_username)
except Exception as e:
logger('ERROR', 'Error updating storage password in modify_storage_password: {0}'.format(e))
def get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict):
"""
map the resource groups to a function that gets resources
"""
resource_group_names = []
for resource_group in resource_groups:
resource_group_names.append(resource_group['name'])
with futures.ThreadPoolExecutor(max_workers=5) as executor:
rg_future = dict((executor.submit(get_resources, ew, bearer_token, sub_url, rg), rg)
for rg in resource_group_names)
for future in futures.as_completed(rg_future, None):
resource_group = rg_future[future]
if future.exception() is not None:
ew.log('ERROR', 'Resource group {0} generated an exception: {1}'
.format(resource_group, future.exception()))
else:
get_metrics_for_resources(ew, bearer_token, \
sub_url, resource_group, future.result(), input_sourcetype, checkpoint_dict)
def get_metrics_for_subscription(inputs, credentials, ew):
"""
top level function
given subscription id and credentials, get metrics for all resources with the right tags
splunk sends an array of inputs, but only one element, hence the [0]
"""
metadata = inputs.metadata
input_name, input_item = inputs.inputs.popitem()
stanza = input_name.split('://')
instance_name = stanza[1]
try:
locale = "checkpoint file data"
checkpoint_dir = metadata['checkpoint_dir']
checkpoint_dict = {"checkpoint_dir":checkpoint_dir, "instance_name": instance_name}
locale = "put_time_window"
# update the time window for this iteration
put_time_window(ew, checkpoint_dict)
locale = "put_time_checkpoint"
# and update the checkpoint for next time
put_time_checkpoint(ew, checkpoint_dict)
tenant_id = input_item.get("SPNTenantID")
spn_client_id = credentials.get('app_id')
spn_client_secret = credentials.get('app_key')
subscription_id = input_item.get("SubscriptionId")
key_vault_name = input_item.get("vaultName")
secret_name = input_item.get("secretName")
secret_version = input_item.get("secretVersion")
input_sourcetype = input_item.get("sourcetype")
arm_creds = {}
if spn_client_id is not None and spn_client_secret is not None:
locale = "get_access_token for key vault SPN"
authentication_endpoint = "https://login.windows.net/"
resource = 'https://vault.azure.net'
kv_bearer_token = get_access_token(
tenant_id,
spn_client_id,
spn_client_secret,
authentication_endpoint,
resource)
locale = "get_secret_from_keyvault"
arm_creds = get_secret_from_keyvault(ew, kv_bearer_token,
key_vault_name, secret_name, secret_version)
locale = "get_access_token"
authentication_endpoint = get_azure_environment(
'Azure')['activeDirectoryEndpointUrl']
resource = get_azure_environment(
'Azure')['activeDirectoryResourceId']
bearer_token = get_access_token(
tenant_id,
arm_creds.get('spn_client_id'),
arm_creds.get('spn_client_secret'),
authentication_endpoint,
resource)
locale = "get_azure_environment"
resource_mgr_endpoint_url = get_azure_environment(
'Azure')['resourceManagerEndpointUrl']
locale = "get_subscription_segment"
sub_url = resource_mgr_endpoint_url + \
get_subscription_segment(subscription_id)
locale = "get_resources"
resource_groups = get_resources(ew, bearer_token, sub_url)
locale = "get_resources_for_rgs"
get_resources_for_rgs(ew, bearer_token, sub_url, resource_groups, input_sourcetype, checkpoint_dict)
except:
ew.log('ERROR', 'Error caught in get_metrics_for_subscription, type: {0}, value: {1}, locale = {2}'
.format(sys.exc_info()[0], sys.exc_info()[1], locale))
| 40.944444 | 123 | 0.674084 | [
"MIT"
] | sebastus/AzureMonitorAddonForSplunk | bin/azure_monitor_metrics_main.py | 11,055 | Python |
from numpy.oldnumeric.ma import *
| 11.666667 | 33 | 0.771429 | [
"Apache-2.0"
] | animesh/parliament2 | docker_version/resources/usr/lib/python2.7/dist-packages/numpy/numarray/ma.py | 50 | Python |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
from version import __version__
setup(
name='opencensus-ext-datadog',
version=__version__, # noqa
author='OpenCensus Authors',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
description='OpenCensus Datadog exporter',
include_package_data=True,
install_requires=[
'bitarray >= 1.0.1, < 2.0.0',
'opencensus >= 0.9.dev0, < 1.0.0',
'requests >= 2.19.0',
],
extras_require={},
license='Apache-2.0',
packages=find_packages(exclude=(
'examples',
'tests',
)),
namespace_packages=[],
url='https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-datadog', # noqa: E501
zip_safe=False,
)
| 35.912281 | 127 | 0.655105 | [
"Apache-2.0"
] | bhumikapahariapuresoftware/opencensus-python | contrib/opencensus-ext-datadog/setup.py | 2,047 | Python |
from itertools import tee
import numpy as np
import scipy.interpolate as intp
from scipy.signal import savgol_filter
def get_edge_bin(array):
"""Detect the edge indcies of a binary 1-D array.
Args:
array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary
(0/1) or boolean (True/False) values.
Returns:
list: A list containing starting and ending indices of the non-zero
blocks.
Examples:
.. code-block:: python
>>> a = [0,1,1,0,0,0,1,0,1]
>>> get_edge_bin(a)
[(1, 3), (6, 7), (8, 9)]
>>> b = [True, False, True, True, False, False]
>>> get_edge_bin(b)
[(0, 1), (2, 4)]
"""
array1 = np.int64(array)
array1 = np.insert(array1, 0, 0)
array1 = np.append(array1, 0)
tmp = array1 - np.roll(array1, 1)
i1_lst = np.nonzero(tmp == 1)[0] - 1
i2_lst = np.nonzero(tmp ==-1)[0] - 1
return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None):
"""Get the local minima of a 1d array in a window.
Args:
x (:class:`numpy.ndarray`): A list or Numpy 1d array.
window (*int* or :class:`numpy.ndarray`): An odd integer or a list of
odd integers as the lengthes of searching window.
Returns:
tuple: A tuple containing:
* **index** (:class:`numpy.ndarray`): A numpy 1d array containing
indices of all local minima.
* **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing
values of all local minima.
"""
x = np.array(x)
dif = np.diff(x)
ind = dif > 0
tmp = np.logical_xor(ind, np.roll(ind,1))
idx = np.logical_and(tmp,ind)
index = np.where(idx)[0]
if window is None:
# window is not given
return index, x[index]
else:
# window is given
if isinstance(window, int):
# window is an integer
window = np.repeat(window, len(x))
elif isinstance(window, np.ndarray):
# window is a numpy array
#if np.issubdtype(window.dtype, int):
if window.dtype.type in [np.int16, np.int32, np.int64]:
pass
else:
# window are not integers
print('window array are not integers')
raise ValueError
else:
raise ValueError
if 0 in window%2:
# not all of the windows are odd
raise ValueError
halfwin_lst = (window-1)//2
index_lst = []
for i in index:
halfwin = halfwin_lst[i]
i1 = max(0, i-halfwin)
i2 = min(i+halfwin+1, len(x))
if i == x[i1:i2].argmin() + i1:
index_lst.append(i)
if len(index_lst)>0:
index_lst = np.array(index_lst)
return index_lst, x[index_lst]
else:
return np.array([]), np.array([])
def implete_none(lst):
"""Replace the None elemnets at the beginning and the end of list by auto
increment integers.
Convert the first and last few `None` elements to auto increment integers.
These integers are determined by the first and last integers in the input
array.
While the `None` elements between two integers in the input list will
remain.
Args:
lst (list): A list contaning None values.
Returns:
newlst (list): A list containing auto increment integers.
Examples:
.. code-block:: python
>>> a = [None,None,3,4,None,5,6,None,None]
>>> implete_none(a)
[1, 2, 3, 4, None, 5, 6, 7, 8]
"""
# filter the None values
notnone_lst = [v for v in lst if v is not None]
for i, v in enumerate(lst):
if v == notnone_lst[0]:
# first not-None element and its index
notnone1 = i
value1 = v
if v == notnone_lst[-1]:
# last not-None element and its index
notnone2 = i
value2 = v
newlst = []
for i,v in enumerate(lst):
if i < notnone1:
newlst.append(value1-(notnone1-i))
elif i > notnone2:
newlst.append(value2+(i-notnone2))
else:
newlst.append(v)
return newlst
def derivative(*args, **kwargs):
"""Get the first derivative of data arrays (*x*, *y*).
If **y** is not given, the first argument will be taken as **y**, and the
differential of the input array will be returned.
Args:
x (list or :class:`numpy.ndarray`): X-values of the input array (optional).
y (list or :class:`numpy.ndarray`): Y-values of the input array.
points (int): Number of points used to calculate derivative
(optional, default is 3).
Returns:
:class:`numpy.ndarray`: Derivative of the input array.
"""
if len(args) == 1:
y = np.array(args[0], dtype=np.float64)
x = np.arange(y.size)
elif len(args) == 2:
x = np.array(args[0], dtype=np.float64)
y = np.array(args[1], dtype=np.float64)
else:
raise ValueError
npts = x.size
points = kwargs.pop('points', 3)
if points == 3:
der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))
a = np.array([-3., 4., -1.])
der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()
der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()
return der
else:
raise ValueError
def pairwise(array):
"""Return pairwises of an iterable arrary.
Args:
array (list or :class:`numpy.ndarray`): The input iterable array.
Returns:
:class:`zip`: zip objects.
"""
a, b = tee(array)
next(b, None)
return zip(a, b)
def smooth(array, points, deg):
"""Smooth an array.
Args:
array (:class:`numpy.ndarray`): Input array.
points (int): Points of smoothing.
deg (int): Degree of smoothing.
Returns:
:class:`numpy.ndarray`: smoothed array
"""
n = array.size
if points == 5:
if deg == 2:
w_2 = np.array([31., 9., -3., -5., 3.])/35.
w_1 = np.array([ 9., 13., 12., 6., -5.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
elif deg == 3:
w_2 = np.array([69., 4., -6., 4., -1.])/70.
w_1 = np.array([ 2., 27., 12., -8., 2.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
a = np.zeros((n, n))
a[0, 0:5] = w_2
a[1, 0:5] = w_1
for i in np.arange(2, n-2):
a[i, i-2:i+3] = w_0
a[-2, -5:] = w_1[::-1]
a[-1, -5:] = w_2[::-1]
result = np.matrix(a)*np.matrix(array.reshape(-1,1))
return np.array(result)[:,0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,
upper_clip=None, lower_clip=None):
"""Smooth the input array with Savitzky-Golay filter with lower and/or
upper clippings.
Args:
y (:class:`numpy.ndarray`): Input array.
winlen (int): Window length of Savitzky-Golay filter.
order (int): Order of Savitzky-Gaoly filter.
maxiter (int): Maximum number of iterations.
lower_clip (float): Lower sigma-clipping value.
upper_clip (float): Upper sigma-clipping value.
Returns:
tuple: A tuple containing:
* **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.
* **yres** (:class:`numpy.ndarray`) – Residuals of y values.
* **mask** (:class:`numpy.ndarray`) – Mask of y values.
* **std** (float) – Standard deviation.
"""
x = np.arange(y.size)
mask = np.ones_like(y, dtype=np.bool)
for ite in range(maxiter):
# fill masked values in y using interpolation
f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)
ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)
yres = y - ysmooth
std = yres[mask].std()
# generate new mask
# make a copy of existing mask
new_mask = mask * np.ones_like(mask, dtype=np.bool)
# give new mask with lower and upper clipping value
if lower_clip is not None:
new_mask *= (yres > -lower_clip * std)
if upper_clip is not None:
new_mask *= (yres < upper_clip * std)
if new_mask.sum() == mask.sum():
break
mask = new_mask
return ysmooth, yres, mask, std
| 31.735075 | 83 | 0.544033 | [
"Apache-2.0"
] | wangleon/gamse | gamse/utils/onedarray.py | 8,513 | Python |
#!/usr/bin/env python
import argparse
import bs4
import os
import re
# better indentation hack via https://stackoverflow.com/a/15513483/127114
orig_prettify = bs4.BeautifulSoup.prettify
r = re.compile(r'^(\s*)', re.MULTILINE)
def prettify(self, encoding=None, formatter="minimal", indent_width=3):
return r.sub(r'\1' * indent_width, orig_prettify(self, encoding, formatter))
bs4.BeautifulSoup.prettify = prettify
def process_file(filepath):
"""
Rewrite links in `filepath` as follows: /some/path/index.html --> /some/path/
"""
# print('processing', filepath)
if filepath.endswith('.html'):
# 1. read
with open(filepath, 'r') as htmlfile:
page = bs4.BeautifulSoup(htmlfile.read(), 'html.parser')
# 2. rewrite links
links = page.find_all('a')
for link in links:
href = link['href']
if href.endswith('index.html'):
href = href.replace('index.html', '')
link['href'] = href
# 3. hack to rewrite subtitle links that wget doesn't handle correctly
video = page.find('video')
if video:
source = video.find('source')
main_file = source['src']
tracks = video.find_all('track')
if tracks:
for track in tracks:
# track_src = track['src']
# new_src = os.path.basename(track_src)
new_src = main_file.replace('.mp4', '.vtt')
track['src'] = new_src
# 4. write
with open(filepath, 'w') as htmlfile:
html = page.prettify()
htmlfile.write(html)
def deindexify(webroot):
"""
Walks directory stucutre starting at `webroot` and rewrites all folder links.
"""
content_folders = list(os.walk(webroot))
for rel_path, _subfolders, filenames in content_folders:
# print('processing folder ' + str(rel_path))
for filename in filenames:
filepath = os.path.join(rel_path, filename)
if filepath.endswith('_Subtitle.vtt'):
video_matching_filepath = filepath.replace('_Subtitle.vtt', '_Low_Resolution.vtt')
os.rename(filepath, video_matching_filepath)
else:
process_file(filepath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("webroot", help="Directory where website is stored.")
args = parser.parse_args()
deindexify(args.webroot)
print('Removing index.html from folder links done.')
| 33.115385 | 98 | 0.603175 | [
"MIT"
] | learningequality/channel2site | scripts/deindexify.py | 2,583 | Python |
"""
Based on REST Framework Parsers, optimized for csv
Parsers are used to parse the content of incoming HTTP requests.
They give us a generic way of being able to handle various media types
on the request, such as form content or json encoded data.
"""
import codecs
from urllib import parse
from django.conf import settings
from django.core.files.uploadhandler import StopFutureHandlers
from django.http import QueryDict
from django.http.multipartparser import ChunkIter
from django.http.multipartparser import \
MultiPartParser as DjangoMultiPartParser
from django.http.multipartparser import MultiPartParserError, parse_header
from django.utils.encoding import force_str
from rest_framework import renderers
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.utils import json
class DataAndFiles:
def __init__(self, data, files):
self.data = data
self.files = files
class BaseParser:
"""
All parsers should extend `BaseParser`, specifying a `media_type`
attribute, and overriding the `.parse()` method.
"""
media_type = None
def parse(self, stream, media_type=None, parser_context=None):
"""
Given a stream to read from, return the parsed representation.
Should return parsed data, or a `DataAndFiles` object consisting of the
parsed data and files.
"""
raise NotImplementedError(".parse() must be overridden.")
class MParser(BaseParser):
"""
Parser for file upload data.
"""
media_type = '*/*'
errors = {
'unhandled': 'FileUpload parse error - none of upload handlers can handle the stream',
'no_filename': 'Missing filename. Request should include a Content-Disposition header with a filename parameter.',
}
def parse(self, stream, media_type=None, parser_context=None):
"""
Treats the incoming bytestream as a raw file upload and returns
a `DataAndFiles` object.
`.data` will be None (we expect request body to be a file content).
`.files` will be a `QueryDict` containing one 'file' element.
"""
parser_context = parser_context or {}
request = parser_context['request']
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
meta = request.META
upload_handlers = request.upload_handlers
filename = self.get_filename(stream, media_type, parser_context)
# Note that this code is extracted from Django's handling of
# file uploads in MultiPartParser.
content_type = meta.get('HTTP_CONTENT_TYPE',
meta.get('CONTENT_TYPE', ''))
try:
content_length = int(meta.get('HTTP_CONTENT_LENGTH',
meta.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = None
# See if the handler will want to take care of the parsing.
for handler in upload_handlers:
result = handler.handle_raw_input(stream,
meta,
content_length,
None,
encoding)
if result is not None:
return DataAndFiles({}, {'file': result[1]})
# This is the standard case.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
chunks = ChunkIter(stream, chunk_size)
counters = [0] * len(upload_handlers)
for index, handler in enumerate(upload_handlers):
try:
handler.new_file(None, filename, content_type,
content_length, encoding)
except StopFutureHandlers:
upload_handlers = upload_handlers[:index + 1]
break
for chunk in chunks:
for index, handler in enumerate(upload_handlers):
"""
Trimming HttpResponse encapsulation from parsed file stream
"""
chunk_length = len(chunk)
start = chunk.find(bytes('\n\r\n','utf-8')) + 3
end = chunk.rfind(bytes('\r\n','utf-8'))
end = chunk[:end].rfind(bytes('\r\n','utf-8')) + 2
chunk = handler.receive_data_chunk(chunk[start:end], counters[index])
counters[index] += chunk_length
if chunk is None:
break
for index, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[index])
if file_obj is not None:
return DataAndFiles({}, {'file': file_obj})
raise ParseError(self.errors['unhandled'])
def get_filename(self, stream, media_type, parser_context):
"""
Detects the uploaded file name. First searches a 'filename' url kwarg.
Then tries to parse Content-Disposition header.
"""
try:
return parser_context['kwargs']['filename']
except KeyError:
pass
try:
meta = parser_context['request'].META
disposition = parse_header(meta['HTTP_CONTENT_DISPOSITION'].encode())
filename_parm = disposition[1]
if 'filename*' in filename_parm:
return self.get_encoded_filename(filename_parm)
return force_str(filename_parm['filename'])
except (AttributeError, KeyError, ValueError):
pass
def get_encoded_filename(self, filename_parm):
"""
Handle encoded filenames per RFC6266. See also:
https://tools.ietf.org/html/rfc2231#section-4
"""
encoded_filename = force_str(filename_parm['filename*'])
try:
charset, lang, filename = encoded_filename.split('\'', 2)
filename = parse.unquote(filename)
except (ValueError, LookupError):
filename = force_str(filename_parm['filename'])
return filename
| 38.64375 | 122 | 0.613456 | [
"MIT"
] | marco-aziz/mPulse | mparser.py | 6,183 | Python |
class CurveByPoints(CurveElement,IDisposable):
""" A curve interpolating two or more points. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetPoints(self):
"""
GetPoints(self: CurveByPoints) -> ReferencePointArray
Get the sequence of points interpolated by this curve.
"""
pass
def GetVisibility(self):
"""
GetVisibility(self: CurveByPoints) -> FamilyElementVisibility
Gets the visibility.
Returns: A copy of visibility settings for the curve.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetPoints(self,points):
"""
SetPoints(self: CurveByPoints,points: ReferencePointArray)
Change the sequence of points interpolated by this curve.
points: An array of 2 or more ReferencePoints.
"""
pass
def SetVisibility(self,visibility):
"""
SetVisibility(self: CurveByPoints,visibility: FamilyElementVisibility)
Sets the visibility.
"""
pass
@staticmethod
def SortPoints(arr):
"""
SortPoints(arr: ReferencePointArray) -> bool
Order a set of ReferencePoints in the same way Revit does
when creating a
curve from points.
arr: An array of ReferencePoints. The array is reordered
if sortPoints returns
true,and is unchanged if
sortPoints returns false.
Returns: False if the least-squares method is unable to find a solution;
true otherwise.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsReferenceLine=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IsReferenceLine(self: CurveByPoints) -> bool
Set: IsReferenceLine(self: CurveByPoints)=value
"""
ReferenceType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Indicates the type of reference.
Get: ReferenceType(self: CurveByPoints) -> ReferenceType
Set: ReferenceType(self: CurveByPoints)=value
"""
SketchPlane=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Override the SketchPlane property of CurveElement.
Get: SketchPlane(self: CurveByPoints) -> SketchPlane
Set: SketchPlane(self: CurveByPoints)=value
"""
Subcategory=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The subcategory,or graphics style,of the CurveByPoints.
Get: Subcategory(self: CurveByPoints) -> GraphicsStyle
Set: Subcategory(self: CurveByPoints)=value
"""
Visible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the point is visible when the family is loaded
into a project.
Get: Visible(self: CurveByPoints) -> bool
Set: Visible(self: CurveByPoints)=value
"""
| 22.341772 | 215 | 0.689235 | [
"MIT"
] | BCSharp/ironpython-stubs | release/stubs.min/Autodesk/Revit/DB/__init___parts/CurveByPoints.py | 3,530 | Python |
# -*- coding: UTF-8 -*-
"""Define transaction calendar"""
import calendar
import datetime
from collections import defaultdict
from utils import Exchange
class TransPeriod(object):
"""
The period of exchange transaction time, e.g. start_time, end_time of a day.
"""
def __init__(self, start_time, end_time):
self._start_time = None
self._end_time = None
if end_time > start_time:
self._start_time = start_time
self._end_time = end_time
else:
raise ValueError('Time Error')
@property
def start_time(self):
return self._start_time
@property
def end_time(self):
return self._end_time
def time_delta(self):
h = self._end_time.hour - self._start_time.hour
m = self._end_time.minute - self._start_time.minute
s = m * 60 + self._end_time.second - self._start_time.second
return datetime.timedelta(hours=h, seconds=s)
class TransCalendar(calendar.Calendar):
"""
The Exchange Transaction Calendar.
Constructor parameters:
day_periods: list of instance of Period,start_time, end_time
first_week_day: the first day of a week, e.g. calendar.SUNDAY
"""
SH_2017 = {2017: [(2017, 1, 1), (2017, 1, 2), (2017, 1, 27), (2017, 1, 28),
(2017, 1, 29), (2017, 1, 30), (2017, 1, 31), (2017, 2, 1),
(2017, 2, 2), (2017, 4, 2), (2017, 4, 3), (2017, 4, 4),
(2017, 5, 1), (2017, 5, 28), (2017, 5, 29), (2017, 5, 30),
(2017, 10, 1), (2017, 10, 2), (2017, 10, 3), (2017, 10, 4),
(2017, 10, 5), (2017, 10, 6), (2017, 10, 7), (2017, 10, 8)]}
Holidays_2017 = {Exchange.SH: SH_2017, Exchange.SZ: SH_2017}
def __init__(self, ex, day_periods, first_week_day=calendar.SUNDAY):
super(TransCalendar, self).__init__(firstweekday=first_week_day)
self._exchange = ex
self._day_periods = day_periods
self._holidays = defaultdict(list)
self.set_holiday(TransCalendar.Holidays_2017[self._exchange])
def set_holiday(self, holidays):
for year, holiday_list in holidays.items():
self._holidays[year] = [datetime.date(*holiday) for holiday in holiday_list]
def is_trans_day(self, dt):
if ((dt.date().weekday() == calendar.SATURDAY) or
(dt.date().weekday() == calendar.SUNDAY) or
(dt.date() in self._holidays[dt.year])):
return False
else:
return True
def is_trans_time(self, dt):
dt_time = dt.time()
for transPeriod in self._day_periods:
if (dt_time >= transPeriod.start_time) and (dt_time <= transPeriod.end_time):
return True
return False
@staticmethod
def next_trans_day(dt):
return dt
| 34.914634 | 89 | 0.595878 | [
"MIT"
] | xuesj/QuoteAdapter | receivers/Calender.py | 2,863 | Python |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imagenet val. annotated by ReaL labels (https://arxiv.org/abs/2006.07159)."""
import json
import os
import tarfile
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = '''\
This dataset contains ILSVRC-2012 (ImageNet) validation images augmented with a
new set of "Re-Assessed" (ReaL) labels from the "Are we done with ImageNet"
paper, see https://arxiv.org/abs/2006.07159. These labels are collected using
the enhanced protocol, resulting in multi-label and more accurate annotations.
Important note: about 3500 examples contain no label, these should be [excluded
from the averaging when computing the accuracy](https://github.com/google-research/reassessed-imagenet#numpy).
One possible way of doing this is with the following NumPy code:
```python
is_correct = [pred in real_labels[i] for i, pred in enumerate(predictions) if real_labels[i]]
real_accuracy = np.mean(is_correct)
```
'''
_CITATION = '''\
@article{beyer2020imagenet,
title={Are we done with ImageNet?},
author={Lucas Beyer and Olivier J. Henaff and Alexander Kolesnikov and Xiaohua Zhai and Aaron van den Oord},
journal={arXiv preprint arXiv:2002.05709},
year={2020}
}
@article{ILSVRC15,
Author={Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
Title={{ImageNet Large Scale Visual Recognition Challenge}},
Year={2015},
journal={International Journal of Computer Vision (IJCV)},
doi={10.1007/s11263-015-0816-y},
volume={115},
number={3},
pages={211-252}
}
'''
_VALIDATION_LABELS_FNAME = 'image_classification/imagenet2012_validation_labels.txt'
_LABELS_FNAME = 'image_classification/imagenet2012_labels.txt'
_REAL_LABELS_URL = 'https://raw.githubusercontent.com/google-research/reassessed-imagenet/master/real.json'
class Imagenet2012Real(tfds.core.GeneratorBasedBuilder):
"""ImageNet validation images with ReaL labels."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
manual_dir should contain `ILSVRC2012_img_val.tar` file.
You need to register on http://www.image-net.org/download-images in order
to get the link to download the dataset.
"""
def _info(self):
names_file = tfds.core.tfds_path(_LABELS_FNAME)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'original_label': tfds.features.ClassLabel(names_file=names_file),
'real_label': tfds.features.Sequence(
tfds.features.ClassLabel(names_file=names_file)),
'file_name': tfds.features.Text(),
}),
supervised_keys=('image', 'real_label'),
homepage='https://github.com/google-research/reassessed-imagenet',
citation=_CITATION,
)
def _get_real_labels(self, dl_manager):
with tf.io.gfile.GFile(dl_manager.download(_REAL_LABELS_URL), 'r') as f:
# ReaL labels are ordered in the lexicographical order.
return {'ILSVRC2012_val_{:08}.JPEG'.format(i + 1): labels
for i, labels in enumerate(json.load(f))}
@staticmethod
def _get_original_labels(val_path):
"""Returns labels for validation.
Args:
val_path: path to TAR file containing validation images. It is used to
retrieve the name of pictures and associate them to labels.
Returns:
dict, mapping from image name (str) to label (str).
"""
labels_path = os.fspath(tfds.core.tfds_path(_VALIDATION_LABELS_FNAME))
with tf.io.gfile.GFile(labels_path) as labels_f:
# `splitlines` to remove trailing `\r` in Windows
labels = labels_f.read().strip().splitlines()
with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:
tar = tarfile.open(mode='r:', fileobj=tar_f_obj)
images = sorted(tar.getnames())
return dict(zip(images, labels))
def _split_generators(self, dl_manager):
val_path = os.path.join(dl_manager.manual_dir, 'ILSVRC2012_img_val.tar')
if not tf.io.gfile.exists(val_path):
raise AssertionError(
'ImageNet requires manual download of the data. Please download '
'the train and val set and place them into: {}'.format(val_path))
return [
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'archive': dl_manager.iter_archive(val_path),
'original_labels': self._get_original_labels(val_path),
'real_labels': self._get_real_labels(dl_manager),
},
),
]
def _generate_examples(self, archive, original_labels, real_labels):
for fname, fobj in archive:
record = {
'file_name': fname,
'image': fobj,
'original_label': original_labels[fname],
'real_label': real_labels[fname],
}
yield fname, record
| 37.986667 | 220 | 0.707617 | [
"Apache-2.0"
] | Abduttayyeb/datasets | tensorflow_datasets/image_classification/imagenet2012_real.py | 5,698 | Python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Dan Wendlandt, Nicira, Inc.
import mox
from quantum.agent.linux import ovs_lib, utils
from quantum.openstack.common import uuidutils
from quantum.tests import base
class OVS_Lib_Test(base.BaseTestCase):
"""
A test suite to excercise the OVS libraries shared by Quantum agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=2"
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.mox.StubOutWithMock(utils, "execute")
self.addCleanup(self.mox.UnsetStubs)
def test_vifport(self):
"""create and stringify vif port, confirm no exceptions"""
self.mox.ReplayAll()
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
foo = str(port)
self.mox.VerifyAll()
def test_reset_bridge(self):
utils.execute(["ovs-vsctl", self.TO, "--",
"--if-exists", "del-br", self.BR_NAME],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.reset_bridge()
self.mox.VerifyAll()
def test_delete_port(self):
pname = "tap5"
utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_port(pname)
self.mox.VerifyAll()
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,in_port=%s,dl_vlan=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (ofport, vid, lsw_id)],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef",
actions="strip_vlan,output:0")
self.br.add_flow(priority=1, actions="normal")
self.br.add_flow(priority=2, actions="drop")
self.br.add_flow(priority=2, in_port=ofport, actions="drop")
self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid,
actions="strip_vlan,set_tunnel:%s,normal" %
(lsw_id))
self.br.add_flow(priority=3, tun_id=lsw_id,
actions="mod_vlan_vid:%s,output:%s" %
(vid, ofport))
self.mox.VerifyAll()
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.mox.VerifyAll()
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
utils.execute(["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper).AndReturn(datapath_id)
self.mox.ReplayAll()
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.mox.VerifyAll()
def test_count_flows(self):
utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper).AndReturn('ignore'
'\nflow-1\n')
self.mox.ReplayAll()
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.mox.VerifyAll()
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
self.mox.VerifyAll()
def test_add_tunnel_port(self):
pname = "tap99"
ip = "9.9.9.9"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:remote_ip=" + ip],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:in_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:out_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_tunnel_port(pname, ip), ofport)
self.mox.VerifyAll()
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=patch"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set",
"Interface", pname, "options:peer=" + peer],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
self.mox.VerifyAll()
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper).AndReturn("%s\n" % pname)
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper).AndReturn(external_ids)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
if is_xen:
utils.execute(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper).AndReturn(vif_id)
self.mox.ReplayAll()
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.mox.VerifyAll()
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(True)
def test_clear_db_attribute(self):
pname = "tap77"
utils.execute(["ovs-vsctl", self.TO, "clear", "Port",
pname, "tag"], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.clear_db_attribute("Port", pname, "tag")
self.mox.VerifyAll()
def test_port_id_regex(self):
result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",'
' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",'
' iface-status=active}\nname :'
' "dhc5c1321a7-c7"\nofport : 2\n')
match = self.br.re_id.search(result)
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2')
self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f')
self.assertEqual(port_name, 'dhc5c1321a7-c7')
self.assertEqual(ofport, 2)
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndReturn('br-int')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.mox.VerifyAll()
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndRaise(Exception)
self.mox.ReplayAll()
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.mox.VerifyAll()
def test_delete_all_ports(self):
self.mox.StubOutWithMock(self.br, 'get_port_name_list')
self.br.get_port_name_list().AndReturn(['port1'])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('port1')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=True)
self.mox.VerifyAll()
def test_delete_quantum_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
ports = [port1, port2]
self.mox.StubOutWithMock(self.br, 'get_vif_ports')
self.br.get_vif_ports().AndReturn([port1, port2])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('tap1234')
self.br.delete_port('tap5678')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=False)
self.mox.VerifyAll()
def test_get_bridges(self):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "list-br"],
root_helper=root_helper).AndReturn('br-int\nbr-ex\n')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.mox.VerifyAll()
| 40.616046 | 78 | 0.567478 | [
"Apache-2.0"
] | ericwanghp/quantum | quantum/tests/unit/openvswitch/test_ovs_lib.py | 14,175 | Python |
# Generated by Django 3.0.6 on 2020-05-28 17:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Provide name of your service', max_length=50)),
('description', models.CharField(help_text='Provide short decription', max_length=500)),
('bootstrap_icon', models.CharField(help_text='Enter bootstrap icon here', max_length=500)),
('link', models.CharField(help_text='Provide link to an action', max_length=100)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Enter task title', max_length=50)),
('description', models.CharField(help_text='Enter task description', max_length=500)),
('bootstrap_icon', models.CharField(help_text='Enter bootstrap icon here', max_length=500)),
('link', models.CharField(help_text='Путь к настройке задания', max_length=100)),
],
),
migrations.CreateModel(
name='ViberToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('data', models.CharField(max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TelegramToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('data', models.CharField(max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('telegram_tokens', models.ManyToManyField(to='botapp.TelegramToken')),
('viber_tokens', models.ManyToManyField(to='botapp.ViberToken')),
],
),
]
| 46.878788 | 119 | 0.60181 | [
"Apache-2.0"
] | AIexBondar/my_works | projectamber/botapp/migrations/0001_initial.py | 3,115 | Python |
"""Tests for the HTTP API Client."""
import pytest
import solana.system_program as sp
from solana.rpc.api import DataSliceOpt, Client
from solana.keypair import Keypair
from solana.rpc.core import RPCException
from solana.rpc.types import RPCError
from solana.transaction import Transaction
from solana.rpc.commitment import Finalized
from spl.token.constants import WRAPPED_SOL_MINT
from .utils import AIRDROP_AMOUNT, assert_valid_response
@pytest.mark.integration
def test_request_air_drop(stubbed_sender: Keypair, test_http_client: Client):
"""Test air drop to stubbed_sender."""
resp = test_http_client.request_airdrop(stubbed_sender.public_key, AIRDROP_AMOUNT)
assert_valid_response(resp)
test_http_client.confirm_transaction(resp["result"])
balance = test_http_client.get_balance(stubbed_sender.public_key)
assert balance["result"]["value"] == AIRDROP_AMOUNT
@pytest.mark.integration
def test_request_air_drop_prefetched_blockhash(stubbed_sender_prefetched_blockhash, test_http_client):
"""Test air drop to stubbed_sender."""
resp = test_http_client.request_airdrop(stubbed_sender_prefetched_blockhash.public_key, AIRDROP_AMOUNT)
assert_valid_response(resp)
test_http_client.confirm_transaction(resp["result"])
balance = test_http_client.get_balance(stubbed_sender_prefetched_blockhash.public_key)
assert balance["result"]["value"] == AIRDROP_AMOUNT
@pytest.mark.integration
def test_request_air_drop_cached_blockhash(stubbed_sender_cached_blockhash, test_http_client):
"""Test air drop to stubbed_sender."""
resp = test_http_client.request_airdrop(stubbed_sender_cached_blockhash.public_key, AIRDROP_AMOUNT)
assert_valid_response(resp)
test_http_client.confirm_transaction(resp["result"])
assert_valid_response(resp)
balance = test_http_client.get_balance(stubbed_sender_cached_blockhash.public_key)
assert balance["result"]["value"] == AIRDROP_AMOUNT
@pytest.mark.integration
def test_send_invalid_transaction(test_http_client):
"""Test sending an invalid transaction to localnet."""
# Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver
with pytest.raises(RPCException) as exc_info:
test_http_client.send_raw_transaction(b"foo")
assert exc_info.value.args[0].keys() == RPCError.__annotations__.keys() # pylint: disable=no-member
@pytest.mark.integration
def test_send_transaction_and_get_balance(stubbed_sender, stubbed_receiver, test_http_client):
"""Test sending a transaction to localnet."""
# Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver
transfer_tx = Transaction().add(
sp.transfer(sp.TransferParams(from_pubkey=stubbed_sender.public_key, to_pubkey=stubbed_receiver, lamports=1000))
)
resp = test_http_client.send_transaction(transfer_tx, stubbed_sender)
assert_valid_response(resp)
# Confirm transaction
test_http_client.confirm_transaction(resp["result"])
# Check balances
resp = test_http_client.get_balance(stubbed_sender.public_key)
assert_valid_response(resp)
assert resp["result"]["value"] == 9999994000
resp = test_http_client.get_balance(stubbed_receiver)
assert_valid_response(resp)
assert resp["result"]["value"] == 954
@pytest.mark.integration
def test_send_transaction_prefetched_blockhash(
stubbed_sender_prefetched_blockhash, stubbed_receiver_prefetched_blockhash, test_http_client
):
"""Test sending a transaction to localnet."""
# Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver
transfer_tx = Transaction().add(
sp.transfer(
sp.TransferParams(
from_pubkey=stubbed_sender_prefetched_blockhash.public_key,
to_pubkey=stubbed_receiver_prefetched_blockhash,
lamports=1000,
)
)
)
recent_blockhash = test_http_client.parse_recent_blockhash(test_http_client.get_recent_blockhash(Finalized))
resp = test_http_client.send_transaction(
transfer_tx, stubbed_sender_prefetched_blockhash, recent_blockhash=recent_blockhash
)
assert_valid_response(resp)
# Confirm transaction
test_http_client.confirm_transaction(resp["result"])
# Check balances
resp = test_http_client.get_balance(stubbed_sender_prefetched_blockhash.public_key)
assert_valid_response(resp)
assert resp["result"]["value"] == 9999994000
resp = test_http_client.get_balance(stubbed_receiver_prefetched_blockhash)
assert_valid_response(resp)
assert resp["result"]["value"] == 954
@pytest.mark.integration
def test_send_transaction_cached_blockhash(
stubbed_sender_cached_blockhash, stubbed_receiver_cached_blockhash, test_http_client_cached_blockhash
):
"""Test sending a transaction to localnet."""
# Create transfer tx to transfer lamports from stubbed sender to stubbed_receiver
transfer_tx = Transaction().add(
sp.transfer(
sp.TransferParams(
from_pubkey=stubbed_sender_cached_blockhash.public_key,
to_pubkey=stubbed_receiver_cached_blockhash,
lamports=1000,
)
)
)
assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) == 0
assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) == 0
resp = test_http_client_cached_blockhash.send_transaction(transfer_tx, stubbed_sender_cached_blockhash)
# we could have got a new blockhash or not depending on network latency and luck
assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) in (0, 1)
assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) == 1
assert_valid_response(resp)
# Confirm transaction
test_http_client_cached_blockhash.confirm_transaction(resp["result"])
# Check balances
resp = test_http_client_cached_blockhash.get_balance(stubbed_sender_cached_blockhash.public_key)
assert_valid_response(resp)
assert resp["result"]["value"] == 9999994000
# Second transaction
transfer_tx = Transaction().add(
sp.transfer(
sp.TransferParams(
from_pubkey=stubbed_sender_cached_blockhash.public_key,
to_pubkey=stubbed_receiver_cached_blockhash,
lamports=2000,
)
)
)
resp = test_http_client_cached_blockhash.get_balance(stubbed_receiver_cached_blockhash)
assert_valid_response(resp)
assert resp["result"]["value"] == 954
resp = test_http_client_cached_blockhash.send_transaction(transfer_tx, stubbed_sender_cached_blockhash)
# we could have got a new blockhash or not depending on network latency and luck
assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) in (0, 1)
assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) in (1, 2)
assert_valid_response(resp)
# Confirm transaction
test_http_client_cached_blockhash.confirm_transaction(resp["result"])
# Check balances
resp = test_http_client_cached_blockhash.get_balance(stubbed_sender_cached_blockhash.public_key)
assert_valid_response(resp)
assert resp["result"]["value"] == 9999987000
assert len(test_http_client_cached_blockhash.blockhash_cache.unused_blockhashes) == 1
assert len(test_http_client_cached_blockhash.blockhash_cache.used_blockhashes) == 1
@pytest.mark.integration
def test_send_raw_transaction_and_get_balance(stubbed_sender, stubbed_receiver, test_http_client):
"""Test sending a raw transaction to localnet."""
# Get a recent blockhash
resp = test_http_client.get_recent_blockhash(Finalized)
assert_valid_response(resp)
recent_blockhash = resp["result"]["value"]["blockhash"]
# Create transfer tx transfer lamports from stubbed sender to stubbed_receiver
transfer_tx = Transaction(recent_blockhash=recent_blockhash).add(
sp.transfer(sp.TransferParams(from_pubkey=stubbed_sender.public_key, to_pubkey=stubbed_receiver, lamports=1000))
)
# Sign transaction
transfer_tx.sign(stubbed_sender)
# Send raw transaction
resp = test_http_client.send_raw_transaction(transfer_tx.serialize())
assert_valid_response(resp)
# Confirm transaction
test_http_client.confirm_transaction(resp["result"])
# Check balances
resp = test_http_client.get_balance(stubbed_sender.public_key)
assert_valid_response(resp)
assert resp["result"]["value"] == 9999988000
resp = test_http_client.get_balance(stubbed_receiver)
assert_valid_response(resp)
assert resp["result"]["value"] == 1954
@pytest.mark.integration
def test_confirm_bad_signature(test_http_client: Client) -> None:
"""Test that RPCException is raised when trying to confirm an invalid signature."""
with pytest.raises(RPCException) as exc_info:
test_http_client.confirm_transaction("foo")
err_object = exc_info.value.args[0]
assert err_object == {"code": -32602, "message": "Invalid param: WrongSize"}
@pytest.mark.integration
def test_get_block_commitment(test_http_client):
"""Test get block commitment."""
resp = test_http_client.get_block_commitment(5)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_block_time(test_http_client):
"""Test get block time."""
resp = test_http_client.get_block_time(5)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_cluster_nodes(test_http_client):
"""Test get cluster nodes."""
resp = test_http_client.get_cluster_nodes()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_confirmed_block(test_http_client):
"""Test get confirmed block."""
resp = test_http_client.get_confirmed_block(1)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_confirmed_block_with_encoding(test_http_client):
"""Test get confrimed block with encoding."""
resp = test_http_client.get_confirmed_block(1, encoding="base64")
assert_valid_response(resp)
@pytest.mark.integration
def test_get_block(test_http_client):
"""Test get block."""
resp = test_http_client.get_block(1)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_block_with_encoding(test_http_client):
"""Test get block with encoding."""
resp = test_http_client.get_block(1, encoding="base64")
assert_valid_response(resp)
@pytest.mark.integration
def test_get_block_height(test_http_client):
"""Test get height."""
resp = test_http_client.get_block_height()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_confirmed_blocks(test_http_client):
"""Test get confirmed blocks."""
resp = test_http_client.get_confirmed_blocks(5, 10)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_blocks(test_http_client):
"""Test get blocks."""
resp = test_http_client.get_blocks(5, 10)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_confirmed_signature_for_address2(test_http_client):
"""Test get confirmed signature for address2."""
resp = test_http_client.get_confirmed_signature_for_address2("Vote111111111111111111111111111111111111111", limit=1)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_signatures_for_address(test_http_client):
"""Test get signatures for addresses."""
resp = test_http_client.get_signatures_for_address("Vote111111111111111111111111111111111111111", limit=1)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_epoch_info(test_http_client):
"""Test get epoch info."""
resp = test_http_client.get_epoch_info()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_epoch_schedule(test_http_client):
"""Test get epoch schedule."""
resp = test_http_client.get_epoch_schedule()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_fee_calculator_for_blockhash(test_http_client):
"""Test get fee calculator for blockhash."""
resp = test_http_client.get_recent_blockhash(Finalized)
assert_valid_response(resp)
resp = test_http_client.get_fee_calculator_for_blockhash(resp["result"]["value"]["blockhash"])
assert_valid_response(resp)
@pytest.mark.integration
def test_get_slot(test_http_client):
"""Test get slot."""
resp = test_http_client.get_slot()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_fees(test_http_client):
"""Test get fees."""
resp = test_http_client.get_fees()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_first_available_block(test_http_client):
"""Test get first available block."""
resp = test_http_client.get_first_available_block()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_genesis_hash(test_http_client):
"""Test get genesis hash."""
resp = test_http_client.get_genesis_hash()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_identity(test_http_client):
"""Test get identity."""
resp = test_http_client.get_genesis_hash()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_inflation_governor(test_http_client):
"""Test get inflation governor."""
resp = test_http_client.get_inflation_governor()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_inflation_rate(test_http_client):
"""Test get inflation rate."""
resp = test_http_client.get_inflation_rate()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_largest_accounts(test_http_client):
"""Test get largest accounts."""
resp = test_http_client.get_largest_accounts()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_leader_schedule(test_http_client):
"""Test get leader schedule."""
resp = test_http_client.get_leader_schedule()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_minimum_balance_for_rent_exemption(test_http_client):
"""Test get minimum balance for rent exemption."""
resp = test_http_client.get_minimum_balance_for_rent_exemption(50)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_slot_leader(test_http_client):
"""Test get slot leader."""
resp = test_http_client.get_slot_leader()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_supply(test_http_client):
"""Test get slot leader."""
resp = test_http_client.get_supply()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_transaction_count(test_http_client):
"""Test get transactinon count."""
resp = test_http_client.get_transaction_count()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_version(test_http_client):
"""Test get version."""
resp = test_http_client.get_version()
assert_valid_response(resp)
@pytest.mark.integration
def test_get_account_info(stubbed_sender, test_http_client):
"""Test get_account_info."""
resp = test_http_client.get_account_info(stubbed_sender.public_key)
assert_valid_response(resp)
resp = test_http_client.get_account_info(stubbed_sender.public_key, encoding="jsonParsed")
assert_valid_response(resp)
resp = test_http_client.get_account_info(stubbed_sender.public_key, data_slice=DataSliceOpt(1, 1))
assert_valid_response(resp)
@pytest.mark.integration
def test_get_multiple_accounts(stubbed_sender, test_http_client):
"""Test get_multiple_accounts."""
pubkeys = [stubbed_sender.public_key] * 2
resp = test_http_client.get_multiple_accounts(pubkeys)
assert_valid_response(resp)
resp = test_http_client.get_multiple_accounts(pubkeys, encoding="jsonParsed")
assert_valid_response(resp)
resp = test_http_client.get_multiple_accounts(pubkeys, data_slice=DataSliceOpt(1, 1))
assert_valid_response(resp)
@pytest.mark.integration
def test_get_token_largest_accounts(test_http_client):
"""Test get token largest accounts."""
resp = test_http_client.get_token_largest_accounts(WRAPPED_SOL_MINT)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_token_supply(test_http_client):
"""Test get token supply."""
resp = test_http_client.get_token_supply(WRAPPED_SOL_MINT)
assert_valid_response(resp)
@pytest.mark.integration
def test_get_vote_accounts(test_http_client):
"""Test get vote accounts."""
resp = test_http_client.get_vote_accounts()
assert_valid_response(resp)
| 37.042506 | 120 | 0.773765 | [
"MIT"
] | 01protocol/solana-py | tests/integration/test_http_client.py | 16,558 | Python |
#!/usr/bin/python
__title__ = 'centinel'
__version__ = '0.1.5.7.1'
import centinel.backend
import centinel.client
import centinel.command
import centinel.config
import centinel.cli
import centinel.daemonize
import centinel.utils
| 19.166667 | 25 | 0.808696 | [
"MIT"
] | rpanah/centinel | centinel/__init__.py | 230 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The BitCore Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This module contains utilities for doing coverage analysis on the RPC
interface.
It provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `bitcore-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 27.71028 | 79 | 0.660708 | [
"MIT"
] | Goosey13/bitcore-limtex-broteq | qa/rpc-tests/test_framework/coverage.py | 2,965 | Python |
#!/usr/bin/env python
# coding: utf-8
# This software component is licensed by ST under BSD 3-Clause license,
# the "License"; You may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
# https://opensource.org/licenses/BSD-3-Clause
"""KWS Feature Extraction example."""
import numpy as np
import librosa
import scipy
from scipy.signal import hann
from scipy.fftpack import dct
def mfcc_col(buff_test):
window = 2048
half_window = int(window / 2)
n_mels = 128
n_coeff = 13
assert buff_test.shape == (window,)
hann_asym_f32 = hann(window, sym=False).astype('float32')
assert hann_asym_f32.shape == (window,), hann_asym_f32.shape
buff_hann = buff_test * hann_asym_f32
assert buff_hann.shape == (window,), buff_hann.shape
fft = np.fft.fft(buff_hann, window)[:half_window + 1]
assert fft.shape == (half_window + 1,), fft.shape
ps = np.abs(fft)**2
assert ps.shape == (half_window + 1,)
mel = librosa.filters.mel(sr, window, n_mels)
assert mel.shape == (n_mels, half_window + 1)
energy = np.dot(mel, ps)
assert energy.shape == (n_mels,)
logamplitude = 10 * np.log10(energy)
assert logamplitude.shape == (n_mels,)
dct_out = dct(logamplitude, type=3)
assert dct_out.shape == (n_mels,)
return(dct_out[1:(n_coeff + 1)])
# buffer_bus_01 is made of first 2048 samples of "bus.wav" file
sr, ys = scipy.io.wavfile.read("bus.wav")
buffer_01 = ys[0:2048]
mfcc_col = mfcc_col(buffer_01)
print('mfcc = ', mfcc_col[:])
| 26.190476 | 75 | 0.643636 | [
"MIT"
] | MahendraSondagar/STMicroelectronics | SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Middlewares/ST/STM32_AI_AudioPreprocessing_Library/Python/MFCC.py | 1,650 | Python |
# Generated by Django 3.2.7 on 2021-09-10 14:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AppState',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('play_state', models.CharField(choices=[('PLAY', 'Play'), ('STOP', 'Stop')], default='STOP', max_length=4)),
('health_state', models.CharField(choices=[('GOOD', 'Good'), ('BAD', 'Bad'), ('PEND', 'Pending')], default='PEND', max_length=4)),
],
),
migrations.CreateModel(
name='AssetSource',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('type', models.CharField(choices=[('JSON', 'Json'), ('SCRP', 'Scrape')], default='JSON', max_length=4)),
('post_title', models.CharField(max_length=200)),
('url', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_stamp', models.DateTimeField()),
('source', models.CharField(max_length=200)),
('type', models.IntegerField(choices=[(0, 'Log'), (1, 'Warn'), (2, 'Err')])),
('text', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Asset',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.CharField(max_length=1000)),
('link', models.CharField(max_length=1000)),
('time_stamp', models.DateTimeField()),
('sent', models.BooleanField()),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.assetsource')),
],
),
]
| 43.946429 | 147 | 0.538399 | [
"MIT"
] | Polarts/UEMarketplaceAlertsBot | bot/migrations/0001_initial.py | 2,461 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.